2005-08-09 07:14:26 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* fe-connect.c
|
1996-07-09 08:22:35 +02:00
|
|
|
* functions related to setting up a connection to the backend
|
|
|
|
*
|
2022-01-08 01:04:57 +01:00
|
|
|
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/interfaces/libpq/fe-connect.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
2001-02-10 03:31:31 +01:00
|
|
|
#include "postgres_fe.h"
|
2000-09-27 17:17:57 +02:00
|
|
|
|
2002-08-30 01:06:32 +02:00
|
|
|
#include <sys/stat.h>
|
1999-07-19 04:27:16 +02:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <ctype.h>
|
2022-08-13 23:53:28 +02:00
|
|
|
#include <netdb.h>
|
2002-08-18 02:06:01 +02:00
|
|
|
#include <time.h>
|
2004-10-21 22:23:19 +02:00
|
|
|
#include <unistd.h>
|
1999-07-19 04:27:16 +02:00
|
|
|
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "common/ip.h"
|
|
|
|
#include "common/link-canary.h"
|
|
|
|
#include "common/scram-common.h"
|
|
|
|
#include "common/string.h"
|
|
|
|
#include "fe-auth.h"
|
1998-08-17 05:50:43 +02:00
|
|
|
#include "libpq-fe.h"
|
|
|
|
#include "libpq-int.h"
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "mb/pg_wchar.h"
|
2004-05-21 22:56:50 +02:00
|
|
|
#include "pg_config_paths.h"
|
2019-10-23 06:08:53 +02:00
|
|
|
#include "port/pg_bswap.h"
|
1998-08-17 05:50:43 +02:00
|
|
|
|
1998-07-03 06:29:04 +02:00
|
|
|
#ifdef WIN32
|
|
|
|
#include "win32.h"
|
2005-01-10 01:19:51 +01:00
|
|
|
#ifdef _WIN32_IE
|
|
|
|
#undef _WIN32_IE
|
|
|
|
#endif
|
2005-01-26 20:24:03 +01:00
|
|
|
#define _WIN32_IE 0x0500
|
2005-01-10 01:19:51 +01:00
|
|
|
#ifdef near
|
|
|
|
#undef near
|
|
|
|
#endif
|
|
|
|
#define near
|
2005-01-06 19:29:11 +01:00
|
|
|
#include <shlobj.h>
|
2010-07-08 12:20:14 +02:00
|
|
|
#include <mstcpip.h>
|
1998-07-03 06:29:04 +02:00
|
|
|
#else
|
2000-01-18 20:05:31 +01:00
|
|
|
#include <sys/socket.h>
|
1996-07-09 08:22:35 +02:00
|
|
|
#include <netdb.h>
|
2000-05-21 23:19:53 +02:00
|
|
|
#include <netinet/in.h>
|
2000-09-27 17:17:57 +02:00
|
|
|
#include <netinet/tcp.h>
|
|
|
|
#endif
|
1996-11-03 08:14:32 +01:00
|
|
|
|
2004-01-09 03:02:43 +01:00
|
|
|
#ifdef ENABLE_THREAD_SAFETY
|
2005-08-23 23:02:05 +02:00
|
|
|
#ifdef WIN32
|
|
|
|
#include "pthread-win32.h"
|
|
|
|
#else
|
2004-01-09 03:02:43 +01:00
|
|
|
#include <pthread.h>
|
|
|
|
#endif
|
2005-08-23 23:02:05 +02:00
|
|
|
#endif
|
2004-01-09 03:02:43 +01:00
|
|
|
|
2006-07-27 15:20:24 +02:00
|
|
|
#ifdef USE_LDAP
|
|
|
|
#ifdef WIN32
|
|
|
|
#include <winldap.h>
|
|
|
|
#else
|
|
|
|
/* OpenLDAP deprecates RFC 1823, but we want standard conformance */
|
|
|
|
#define LDAP_DEPRECATED 1
|
|
|
|
#include <ldap.h>
|
|
|
|
typedef struct timeval LDAP_TIMEVAL;
|
|
|
|
#endif
|
|
|
|
static int ldapServiceLookup(const char *purl, PQconninfoOption *options,
|
|
|
|
PQExpBuffer errorMessage);
|
|
|
|
#endif
|
|
|
|
|
2005-01-06 19:29:11 +01:00
|
|
|
#ifndef WIN32
|
2003-06-08 19:43:00 +02:00
|
|
|
#define PGPASSFILE ".pgpass"
|
2005-01-06 19:29:11 +01:00
|
|
|
#else
|
2005-01-14 01:25:56 +01:00
|
|
|
#define PGPASSFILE "pgpass.conf"
|
2005-01-06 19:29:11 +01:00
|
|
|
#endif
|
2000-01-18 20:05:31 +01:00
|
|
|
|
2009-12-02 05:38:35 +01:00
|
|
|
/*
|
2010-02-17 05:19:41 +01:00
|
|
|
* Pre-9.0 servers will return this SQLSTATE if asked to set
|
2009-12-02 05:38:35 +01:00
|
|
|
* application_name in a startup packet. We hard-wire the value rather
|
|
|
|
* than looking into errcodes.h since it reflects historical behavior
|
|
|
|
* rather than that of the current code.
|
|
|
|
*/
|
|
|
|
#define ERRCODE_APPNAME_UNKNOWN "42704"
|
|
|
|
|
2010-03-13 15:55:57 +01:00
|
|
|
/* This is part of the protocol so just define it */
|
|
|
|
#define ERRCODE_INVALID_PASSWORD "28P01"
|
2010-11-27 07:30:34 +01:00
|
|
|
/* This too */
|
|
|
|
#define ERRCODE_CANNOT_CONNECT_NOW "57P03"
|
2010-03-13 15:55:57 +01:00
|
|
|
|
2017-06-28 18:30:16 +02:00
|
|
|
/*
|
|
|
|
* Cope with the various platform-specific ways to spell TCP keepalive socket
|
|
|
|
* options. This doesn't cover Windows, which as usual does its own thing.
|
|
|
|
*/
|
|
|
|
#if defined(TCP_KEEPIDLE)
|
|
|
|
/* TCP_KEEPIDLE is the name of this option on Linux and *BSD */
|
|
|
|
#define PG_TCP_KEEPALIVE_IDLE TCP_KEEPIDLE
|
|
|
|
#define PG_TCP_KEEPALIVE_IDLE_STR "TCP_KEEPIDLE"
|
|
|
|
#elif defined(TCP_KEEPALIVE_THRESHOLD)
|
|
|
|
/* TCP_KEEPALIVE_THRESHOLD is the name of this option on Solaris >= 11 */
|
|
|
|
#define PG_TCP_KEEPALIVE_IDLE TCP_KEEPALIVE_THRESHOLD
|
|
|
|
#define PG_TCP_KEEPALIVE_IDLE_STR "TCP_KEEPALIVE_THRESHOLD"
|
|
|
|
#elif defined(TCP_KEEPALIVE) && defined(__darwin__)
|
|
|
|
/* TCP_KEEPALIVE is the name of this option on macOS */
|
|
|
|
/* Caution: Solaris has this symbol but it means something different */
|
|
|
|
#define PG_TCP_KEEPALIVE_IDLE TCP_KEEPALIVE
|
|
|
|
#define PG_TCP_KEEPALIVE_IDLE_STR "TCP_KEEPALIVE"
|
|
|
|
#endif
|
|
|
|
|
2009-12-02 05:38:35 +01:00
|
|
|
/*
|
|
|
|
* fall back options if they are not specified by arguments or defined
|
|
|
|
* by environment variables
|
|
|
|
*/
|
2003-06-08 19:43:00 +02:00
|
|
|
#define DefaultHost "localhost"
|
|
|
|
#define DefaultOption ""
|
2019-09-23 22:45:23 +02:00
|
|
|
#ifdef USE_SSL
|
|
|
|
#define DefaultChannelBinding "prefer"
|
|
|
|
#else
|
|
|
|
#define DefaultChannelBinding "disable"
|
|
|
|
#endif
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
#define DefaultTargetSessionAttrs "any"
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
#ifdef USE_SSL
|
2009-04-24 11:43:10 +02:00
|
|
|
#define DefaultSSLMode "prefer"
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
#else
|
|
|
|
#define DefaultSSLMode "disable"
|
|
|
|
#endif
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
#include "fe-gssapi-common.h"
|
|
|
|
#define DefaultGSSMode "prefer"
|
|
|
|
#else
|
|
|
|
#define DefaultGSSMode "disable"
|
|
|
|
#endif
|
2002-06-14 06:09:37 +02:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
/* ----------
|
1996-11-14 11:25:54 +01:00
|
|
|
* Definition of the conninfo parameters and their fallback resources.
|
2000-03-11 04:08:37 +01:00
|
|
|
*
|
1996-11-09 11:39:54 +01:00
|
|
|
* If Environment-Var and Compiled-in are specified as NULL, no
|
|
|
|
* fallback is available. If after all no value can be determined
|
|
|
|
* for an option, an error is returned.
|
|
|
|
*
|
2012-03-22 17:08:34 +01:00
|
|
|
* The value for the username is treated specially in conninfo_add_defaults.
|
|
|
|
* If the value is not obtained any other way, the username is determined
|
|
|
|
* by pg_fe_getauthname().
|
1996-11-09 11:39:54 +01:00
|
|
|
*
|
|
|
|
* The Label and Disp-Char entries are provided for applications that
|
|
|
|
* want to use PQconndefaults() to create a generic database connection
|
|
|
|
* dialog. Disp-Char is defined as follows:
|
2000-03-11 04:08:37 +01:00
|
|
|
* "" Normal input field
|
|
|
|
* "*" Password field - hide value
|
|
|
|
* "D" Debug option - don't show by default
|
|
|
|
*
|
|
|
|
* PQconninfoOptions[] is a constant static array that we use to initialize
|
|
|
|
* a dynamically allocated working copy. All the "val" fields in
|
|
|
|
* PQconninfoOptions[] *must* be NULL. In a working copy, non-null "val"
|
|
|
|
* fields point to malloc'd strings that should be freed when the working
|
|
|
|
* array is freed (see PQconninfoFree).
|
2012-11-30 07:09:18 +01:00
|
|
|
*
|
|
|
|
* The first part of each struct is identical to the one in libpq-fe.h,
|
|
|
|
* which is required since we memcpy() data between the two!
|
1996-11-09 11:39:54 +01:00
|
|
|
* ----------
|
|
|
|
*/
|
2012-11-30 07:09:18 +01:00
|
|
|
typedef struct _internalPQconninfoOption
|
|
|
|
{
|
|
|
|
char *keyword; /* The keyword of the option */
|
|
|
|
char *envvar; /* Fallback environment variable name */
|
|
|
|
char *compiled; /* Fallback compiled in default value */
|
2019-09-10 17:13:29 +02:00
|
|
|
char *val; /* Option's current value, or NULL */
|
2012-11-30 07:09:18 +01:00
|
|
|
char *label; /* Label for field in connect dialog */
|
|
|
|
char *dispchar; /* Indicates how to display this field in a
|
|
|
|
* connect dialog. Values are: "" Display
|
|
|
|
* entered value as is "*" Password field -
|
|
|
|
* hide value "D" Debug option - don't show
|
|
|
|
* by default */
|
|
|
|
int dispsize; /* Field size in characters for dialog */
|
|
|
|
/* ---
|
|
|
|
* Anything above this comment must be synchronized with
|
|
|
|
* PQconninfoOption in libpq-fe.h, since we memcpy() data
|
|
|
|
* between them!
|
|
|
|
* ---
|
|
|
|
*/
|
|
|
|
off_t connofs; /* Offset into PGconn struct, -1 if not there */
|
|
|
|
} internalPQconninfoOption;
|
|
|
|
|
|
|
|
static const internalPQconninfoOption PQconninfoOptions[] = {
|
2000-10-17 03:00:58 +02:00
|
|
|
{"service", "PGSERVICE", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Database-Service", "", 20, -1},
|
2000-10-17 03:00:58 +02:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
{"user", "PGUSER", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Database-User", "", 20,
|
|
|
|
offsetof(struct pg_conn, pguser)},
|
1996-11-09 11:39:54 +01:00
|
|
|
|
2003-04-28 06:52:13 +02:00
|
|
|
{"password", "PGPASSWORD", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Database-Password", "*", 20,
|
|
|
|
offsetof(struct pg_conn, pgpass)},
|
1997-03-12 22:23:16 +01:00
|
|
|
|
2017-01-24 23:06:21 +01:00
|
|
|
{"passfile", "PGPASSFILE", NULL, NULL,
|
|
|
|
"Database-Password-File", "", 64,
|
|
|
|
offsetof(struct pg_conn, pgpassfile)},
|
|
|
|
|
2020-12-28 18:13:40 +01:00
|
|
|
{"channel_binding", "PGCHANNELBINDING", DefaultChannelBinding, NULL,
|
2020-01-29 07:08:19 +01:00
|
|
|
"Channel-Binding", "", 8, /* sizeof("require") == 8 */
|
2019-09-23 22:45:23 +02:00
|
|
|
offsetof(struct pg_conn, channel_binding)},
|
|
|
|
|
2002-08-18 03:35:40 +02:00
|
|
|
{"connect_timeout", "PGCONNECT_TIMEOUT", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Connect-timeout", "", 10, /* strlen(INT32_MAX) == 10 */
|
|
|
|
offsetof(struct pg_conn, connect_timeout)},
|
2002-08-17 14:33:18 +02:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
{"dbname", "PGDATABASE", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Database-Name", "", 20,
|
|
|
|
offsetof(struct pg_conn, dbName)},
|
1996-11-09 11:39:54 +01:00
|
|
|
|
1997-11-07 21:52:15 +01:00
|
|
|
{"host", "PGHOST", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Database-Host", "", 40,
|
|
|
|
offsetof(struct pg_conn, pghost)},
|
1996-11-09 11:39:54 +01:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
{"hostaddr", "PGHOSTADDR", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Database-Host-IP-Address", "", 45,
|
|
|
|
offsetof(struct pg_conn, pghostaddr)},
|
2003-01-08 17:21:53 +01:00
|
|
|
|
2000-05-31 02:28:42 +02:00
|
|
|
{"port", "PGPORT", DEF_PGPORT_STR, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Database-Port", "", 6,
|
|
|
|
offsetof(struct pg_conn, pgport)},
|
1996-11-09 11:39:54 +01:00
|
|
|
|
2011-02-19 07:54:58 +01:00
|
|
|
{"client_encoding", "PGCLIENTENCODING", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Client-Encoding", "", 10,
|
|
|
|
offsetof(struct pg_conn, client_encoding_initial)},
|
2011-02-19 07:54:58 +01:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
{"options", "PGOPTIONS", DefaultOption, NULL,
|
2018-09-07 15:01:25 +02:00
|
|
|
"Backend-Options", "", 40,
|
2012-11-30 07:09:18 +01:00
|
|
|
offsetof(struct pg_conn, pgoptions)},
|
2000-03-11 04:08:37 +01:00
|
|
|
|
2009-11-29 00:38:08 +01:00
|
|
|
{"application_name", "PGAPPNAME", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Application-Name", "", 64,
|
|
|
|
offsetof(struct pg_conn, appname)},
|
2009-11-29 00:38:08 +01:00
|
|
|
|
|
|
|
{"fallback_application_name", NULL, NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Fallback-Application-Name", "", 64,
|
|
|
|
offsetof(struct pg_conn, fbappname)},
|
2009-11-29 00:38:08 +01:00
|
|
|
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
{"keepalives", NULL, NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"TCP-Keepalives", "", 1, /* should be just '0' or '1' */
|
|
|
|
offsetof(struct pg_conn, keepalives)},
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
|
|
|
|
{"keepalives_idle", NULL, NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"TCP-Keepalives-Idle", "", 10, /* strlen(INT32_MAX) == 10 */
|
|
|
|
offsetof(struct pg_conn, keepalives_idle)},
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
|
|
|
|
{"keepalives_interval", NULL, NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"TCP-Keepalives-Interval", "", 10, /* strlen(INT32_MAX) == 10 */
|
|
|
|
offsetof(struct pg_conn, keepalives_interval)},
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
|
|
|
|
{"keepalives_count", NULL, NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"TCP-Keepalives-Count", "", 10, /* strlen(INT32_MAX) == 10 */
|
|
|
|
offsetof(struct pg_conn, keepalives_count)},
|
2000-08-30 16:54:24 +02:00
|
|
|
|
Add support TCP user timeout in libpq and the backend server
Similarly to the set of parameters for keepalive, a connection parameter
for libpq is added as well as a backend GUC, called tcp_user_timeout.
Increasing the TCP user timeout is useful to allow a connection to
survive extended periods without end-to-end connection, and decreasing
it allows application to fail faster. By default, the parameter is 0,
which makes the connection use the system default, and follows a logic
close to the keepalive parameters in its handling. When connecting
through a Unix-socket domain, the parameters have no effect.
Author: Ryohei Nagaura
Reviewed-by: Fabien Coelho, Robert Haas, Kyotaro Horiguchi, Kirk
Jamison, Mikalai Keida, Takayuki Tsunakawa, Andrei Yahorau
Discussion: https://postgr.es/m/EDA4195584F5064680D8130B1CA91C45367328@G01JPEXMBYT04
2019-04-06 08:23:37 +02:00
|
|
|
{"tcp_user_timeout", NULL, NULL, NULL,
|
|
|
|
"TCP-User-Timeout", "", 10, /* strlen(INT32_MAX) == 10 */
|
|
|
|
offsetof(struct pg_conn, pgtcp_user_timeout)},
|
|
|
|
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
/*
|
2008-12-15 11:28:22 +01:00
|
|
|
* ssl options are allowed even without client SSL support because the
|
|
|
|
* client can still handle SSL modes "disable" and "allow". Other
|
|
|
|
* parameters have no effect on non-SSL connections, so there is no reason
|
|
|
|
* to exclude them since none of them are mandatory.
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
*/
|
|
|
|
{"sslmode", "PGSSLMODE", DefaultSSLMode, NULL,
|
2014-03-17 02:43:40 +01:00
|
|
|
"SSL-Mode", "", 12, /* sizeof("verify-full") == 12 */
|
2012-11-30 07:09:18 +01:00
|
|
|
offsetof(struct pg_conn, sslmode)},
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
|
2021-03-10 01:35:42 +01:00
|
|
|
{"sslcompression", "PGSSLCOMPRESSION", "0", NULL,
|
|
|
|
"SSL-Compression", "", 1,
|
|
|
|
offsetof(struct pg_conn, sslcompression)},
|
2011-11-28 13:13:42 +01:00
|
|
|
|
2008-12-15 11:28:22 +01:00
|
|
|
{"sslcert", "PGSSLCERT", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"SSL-Client-Cert", "", 64,
|
|
|
|
offsetof(struct pg_conn, sslcert)},
|
2008-12-15 11:28:22 +01:00
|
|
|
|
|
|
|
{"sslkey", "PGSSLKEY", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"SSL-Client-Key", "", 64,
|
|
|
|
offsetof(struct pg_conn, sslkey)},
|
2008-12-15 11:28:22 +01:00
|
|
|
|
2019-12-20 21:34:07 +01:00
|
|
|
{"sslpassword", NULL, NULL, NULL,
|
|
|
|
"SSL-Client-Key-Password", "*", 20,
|
|
|
|
offsetof(struct pg_conn, sslpassword)},
|
|
|
|
|
2008-12-15 11:28:22 +01:00
|
|
|
{"sslrootcert", "PGSSLROOTCERT", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"SSL-Root-Certificate", "", 64,
|
|
|
|
offsetof(struct pg_conn, sslrootcert)},
|
2008-12-15 11:28:22 +01:00
|
|
|
|
|
|
|
{"sslcrl", "PGSSLCRL", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"SSL-Revocation-List", "", 64,
|
|
|
|
offsetof(struct pg_conn, sslcrl)},
|
2008-12-15 11:28:22 +01:00
|
|
|
|
2021-02-18 07:59:10 +01:00
|
|
|
{"sslcrldir", "PGSSLCRLDIR", NULL, NULL,
|
|
|
|
"SSL-Revocation-List-Dir", "", 64,
|
|
|
|
offsetof(struct pg_conn, sslcrldir)},
|
|
|
|
|
2021-04-07 15:11:41 +02:00
|
|
|
{"sslsni", "PGSSLSNI", "1", NULL,
|
|
|
|
"SSL-SNI", "", 1,
|
|
|
|
offsetof(struct pg_conn, sslsni)},
|
|
|
|
|
2010-07-18 13:37:26 +02:00
|
|
|
{"requirepeer", "PGREQUIREPEER", NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Require-Peer", "", 10,
|
|
|
|
offsetof(struct pg_conn, requirepeer)},
|
2010-07-18 13:37:26 +02:00
|
|
|
|
2020-06-27 18:20:33 +02:00
|
|
|
{"ssl_min_protocol_version", "PGSSLMINPROTOCOLVERSION", "TLSv1.2", NULL,
|
2020-01-28 02:40:48 +01:00
|
|
|
"SSL-Minimum-Protocol-Version", "", 8, /* sizeof("TLSv1.x") == 8 */
|
2020-04-30 06:39:10 +02:00
|
|
|
offsetof(struct pg_conn, ssl_min_protocol_version)},
|
2020-01-28 02:40:48 +01:00
|
|
|
|
2020-04-30 06:39:10 +02:00
|
|
|
{"ssl_max_protocol_version", "PGSSLMAXPROTOCOLVERSION", NULL, NULL,
|
2020-01-28 02:40:48 +01:00
|
|
|
"SSL-Maximum-Protocol-Version", "", 8, /* sizeof("TLSv1.x") == 8 */
|
2020-04-30 06:39:10 +02:00
|
|
|
offsetof(struct pg_conn, ssl_max_protocol_version)},
|
2020-01-28 02:40:48 +01:00
|
|
|
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
/*
|
2019-12-20 21:34:07 +01:00
|
|
|
* As with SSL, all GSS options are exposed even in builds that don't have
|
|
|
|
* support.
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
*/
|
2019-04-05 04:52:42 +02:00
|
|
|
{"gssencmode", "PGGSSENCMODE", DefaultGSSMode, NULL,
|
2020-01-29 07:08:19 +01:00
|
|
|
"GSSENC-Mode", "", 8, /* sizeof("disable") == 8 */
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
offsetof(struct pg_conn, gssencmode)},
|
|
|
|
|
2007-07-10 15:14:22 +02:00
|
|
|
/* Kerberos and GSSAPI authentication support specifying the service name */
|
2005-06-04 22:42:43 +02:00
|
|
|
{"krbsrvname", "PGKRBSRVNAME", PG_KRB_SRVNAM, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Kerberos-service-name", "", 20,
|
|
|
|
offsetof(struct pg_conn, krbsrvname)},
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2007-07-23 12:16:54 +02:00
|
|
|
{"gsslib", "PGGSSLIB", NULL, NULL,
|
2020-01-29 07:08:19 +01:00
|
|
|
"GSS-library", "", 7, /* sizeof("gssapi") == 7 */
|
2012-11-30 07:09:18 +01:00
|
|
|
offsetof(struct pg_conn, gsslib)},
|
2007-07-23 12:16:54 +02:00
|
|
|
|
2010-01-15 10:19:10 +01:00
|
|
|
{"replication", NULL, NULL, NULL,
|
2012-11-30 07:09:18 +01:00
|
|
|
"Replication", "D", 5,
|
|
|
|
offsetof(struct pg_conn, replication)},
|
2010-01-15 10:19:10 +01:00
|
|
|
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
{"target_session_attrs", "PGTARGETSESSIONATTRS",
|
|
|
|
DefaultTargetSessionAttrs, NULL,
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
"Target-Session-Attrs", "", 15, /* sizeof("prefer-standby") = 15 */
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
offsetof(struct pg_conn, target_session_attrs)},
|
|
|
|
|
2000-03-11 04:08:37 +01:00
|
|
|
/* Terminating entry --- MUST BE LAST */
|
1996-11-09 11:39:54 +01:00
|
|
|
{NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, 0}
|
|
|
|
};
|
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
static const PQEnvironmentOption EnvironmentOptions[] =
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1997-11-14 16:38:31 +01:00
|
|
|
/* common user-interface settings */
|
|
|
|
{
|
|
|
|
"PGDATESTYLE", "datestyle"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"PGTZ", "timezone"
|
|
|
|
},
|
|
|
|
/* internal performance-related settings */
|
|
|
|
{
|
|
|
|
"PGGEQO", "geqo"
|
|
|
|
},
|
1997-11-10 16:41:58 +01:00
|
|
|
{
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
NULL, NULL
|
1997-11-10 16:41:58 +01:00
|
|
|
}
|
1997-03-18 21:15:39 +01:00
|
|
|
};
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
/* The connection URI must start with either of the following designators: */
|
|
|
|
static const char uri_designator[] = "postgresql://";
|
|
|
|
static const char short_uri_designator[] = "postgres://";
|
1999-11-30 04:08:19 +01:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
static bool connectOptions1(PGconn *conn, const char *conninfo);
|
|
|
|
static bool connectOptions2(PGconn *conn);
|
2000-03-11 04:08:37 +01:00
|
|
|
static int connectDBStart(PGconn *conn);
|
|
|
|
static int connectDBComplete(PGconn *conn);
|
2010-11-25 19:09:38 +01:00
|
|
|
static PGPing internal_ping(PGconn *conn);
|
2000-03-11 04:08:37 +01:00
|
|
|
static PGconn *makeEmptyPGconn(void);
|
2022-05-12 18:42:29 +02:00
|
|
|
static void pqFreeCommandQueue(PGcmdQueueEntry *queue);
|
2014-11-25 11:55:00 +01:00
|
|
|
static bool fillPGconn(PGconn *conn, PQconninfoOption *connOptions);
|
2000-03-11 04:08:37 +01:00
|
|
|
static void freePGconn(PGconn *conn);
|
|
|
|
static void closePGconn(PGconn *conn);
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
static void release_conn_addrinfo(PGconn *conn);
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
static void sendTerminateConn(PGconn *conn);
|
2012-03-22 17:08:34 +01:00
|
|
|
static PQconninfoOption *conninfo_init(PQExpBuffer errorMessage);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
static PQconninfoOption *parse_connection_string(const char *conninfo,
|
|
|
|
PQExpBuffer errorMessage, bool use_defaults);
|
2015-04-02 16:10:22 +02:00
|
|
|
static int uri_prefix_length(const char *connstr);
|
|
|
|
static bool recognized_connection_string(const char *connstr);
|
2000-03-11 04:08:37 +01:00
|
|
|
static PQconninfoOption *conninfo_parse(const char *conninfo,
|
2008-09-22 16:21:44 +02:00
|
|
|
PQExpBuffer errorMessage, bool use_defaults);
|
2011-09-26 00:52:48 +02:00
|
|
|
static PQconninfoOption *conninfo_array_parse(const char *const *keywords,
|
|
|
|
const char *const *values, PQExpBuffer errorMessage,
|
2010-02-05 04:09:05 +01:00
|
|
|
bool use_defaults, int expand_dbname);
|
2012-03-22 17:08:34 +01:00
|
|
|
static bool conninfo_add_defaults(PQconninfoOption *options,
|
|
|
|
PQExpBuffer errorMessage);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
static PQconninfoOption *conninfo_uri_parse(const char *uri,
|
|
|
|
PQExpBuffer errorMessage, bool use_defaults);
|
|
|
|
static bool conninfo_uri_parse_options(PQconninfoOption *options,
|
|
|
|
const char *uri, PQExpBuffer errorMessage);
|
|
|
|
static bool conninfo_uri_parse_params(char *params,
|
|
|
|
PQconninfoOption *connOptions,
|
|
|
|
PQExpBuffer errorMessage);
|
|
|
|
static char *conninfo_uri_decode(const char *str, PQExpBuffer errorMessage);
|
|
|
|
static bool get_hexdigit(char digit, int *value);
|
|
|
|
static const char *conninfo_getval(PQconninfoOption *connOptions,
|
2000-03-11 04:08:37 +01:00
|
|
|
const char *keyword);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
static PQconninfoOption *conninfo_storeval(PQconninfoOption *connOptions,
|
|
|
|
const char *keyword, const char *value,
|
|
|
|
PQExpBuffer errorMessage, bool ignoreMissing, bool uri_decode);
|
|
|
|
static PQconninfoOption *conninfo_find(PQconninfoOption *connOptions,
|
|
|
|
const char *keyword);
|
2003-06-21 23:51:35 +02:00
|
|
|
static void defaultNoticeReceiver(void *arg, const PGresult *res);
|
2000-03-11 04:08:37 +01:00
|
|
|
static void defaultNoticeProcessor(void *arg, const char *message);
|
2000-10-17 03:00:58 +02:00
|
|
|
static int parseServiceInfo(PQconninfoOption *options,
|
|
|
|
PQExpBuffer errorMessage);
|
2010-01-20 22:15:21 +01:00
|
|
|
static int parseServiceFile(const char *serviceFile,
|
|
|
|
const char *service,
|
|
|
|
PQconninfoOption *options,
|
|
|
|
PQExpBuffer errorMessage,
|
|
|
|
bool *group_found);
|
2017-10-31 15:34:31 +01:00
|
|
|
static char *pwdfMatchesString(char *buf, const char *token);
|
|
|
|
static char *passwordFromFile(const char *hostname, const char *port, const char *dbname,
|
|
|
|
const char *username, const char *pgpassfile);
|
2017-01-24 23:06:21 +01:00
|
|
|
static void pgpassfileWarning(PGconn *conn);
|
2004-12-03 00:20:21 +01:00
|
|
|
static void default_threadlock(int acquire);
|
2020-01-28 02:40:48 +01:00
|
|
|
static bool sslVerifyProtocolVersion(const char *version);
|
|
|
|
static bool sslVerifyProtocolRange(const char *min, const char *max);
|
2004-12-03 00:20:21 +01:00
|
|
|
|
|
|
|
|
|
|
|
/* global variable because fe-auth.c needs to access it */
|
|
|
|
pgthreadlock_t pg_g_threadlock = default_threadlock;
|
2000-03-11 04:08:37 +01:00
|
|
|
|
2004-01-09 03:02:43 +01:00
|
|
|
|
2012-09-07 22:02:23 +02:00
|
|
|
/*
|
|
|
|
* pqDropConnection
|
|
|
|
*
|
|
|
|
* Close any physical connection to the server, and reset associated
|
|
|
|
* state inside the connection object. We don't release state that
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
* would be needed to reconnect, though, nor local state that might still
|
|
|
|
* be useful later.
|
2015-11-12 19:03:52 +01:00
|
|
|
*
|
|
|
|
* We can always flush the output buffer, since there's no longer any hope
|
|
|
|
* of sending that data. However, unprocessed input data might still be
|
|
|
|
* valuable, so the caller must tell us whether to flush that or not.
|
2012-09-07 22:02:23 +02:00
|
|
|
*/
|
|
|
|
void
|
2015-11-12 19:03:52 +01:00
|
|
|
pqDropConnection(PGconn *conn, bool flushInput)
|
2012-09-07 22:02:23 +02:00
|
|
|
{
|
|
|
|
/* Drop any SSL state */
|
|
|
|
pqsecure_close(conn);
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
|
2012-09-07 22:02:23 +02:00
|
|
|
/* Close the socket itself */
|
2014-04-17 01:46:51 +02:00
|
|
|
if (conn->sock != PGINVALID_SOCKET)
|
2012-09-07 22:02:23 +02:00
|
|
|
closesocket(conn->sock);
|
2014-04-17 01:46:51 +02:00
|
|
|
conn->sock = PGINVALID_SOCKET;
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
|
2015-11-12 19:03:52 +01:00
|
|
|
/* Optionally discard any unread data */
|
|
|
|
if (flushInput)
|
|
|
|
conn->inStart = conn->inCursor = conn->inEnd = 0;
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
|
2015-11-12 19:03:52 +01:00
|
|
|
/* Always discard any unsent data */
|
2012-09-07 22:02:23 +02:00
|
|
|
conn->outCount = 0;
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
|
2022-05-12 18:42:29 +02:00
|
|
|
/* Likewise, discard any pending pipelined commands */
|
|
|
|
pqFreeCommandQueue(conn->cmd_queue_head);
|
|
|
|
conn->cmd_queue_head = conn->cmd_queue_tail = NULL;
|
|
|
|
pqFreeCommandQueue(conn->cmd_queue_recycle);
|
|
|
|
conn->cmd_queue_recycle = NULL;
|
|
|
|
|
2020-01-11 23:14:08 +01:00
|
|
|
/* Free authentication/encryption state */
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
{
|
|
|
|
OM_uint32 min_s;
|
|
|
|
|
2020-07-13 17:57:55 +02:00
|
|
|
if (conn->gcred != GSS_C_NO_CREDENTIAL)
|
|
|
|
{
|
|
|
|
gss_release_cred(&min_s, &conn->gcred);
|
|
|
|
conn->gcred = GSS_C_NO_CREDENTIAL;
|
|
|
|
}
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
if (conn->gctx)
|
|
|
|
gss_delete_sec_context(&min_s, &conn->gctx, GSS_C_NO_BUFFER);
|
|
|
|
if (conn->gtarg_nam)
|
|
|
|
gss_release_name(&min_s, &conn->gtarg_nam);
|
2020-01-11 23:14:08 +01:00
|
|
|
if (conn->gss_SendBuffer)
|
|
|
|
{
|
|
|
|
free(conn->gss_SendBuffer);
|
|
|
|
conn->gss_SendBuffer = NULL;
|
|
|
|
}
|
|
|
|
if (conn->gss_RecvBuffer)
|
|
|
|
{
|
|
|
|
free(conn->gss_RecvBuffer);
|
|
|
|
conn->gss_RecvBuffer = NULL;
|
|
|
|
}
|
|
|
|
if (conn->gss_ResultBuffer)
|
|
|
|
{
|
|
|
|
free(conn->gss_ResultBuffer);
|
|
|
|
conn->gss_ResultBuffer = NULL;
|
|
|
|
}
|
2020-07-13 17:57:55 +02:00
|
|
|
conn->gssenc = false;
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef ENABLE_SSPI
|
|
|
|
if (conn->sspitarget)
|
|
|
|
{
|
|
|
|
free(conn->sspitarget);
|
|
|
|
conn->sspitarget = NULL;
|
|
|
|
}
|
|
|
|
if (conn->sspicred)
|
|
|
|
{
|
|
|
|
FreeCredentialsHandle(conn->sspicred);
|
|
|
|
free(conn->sspicred);
|
|
|
|
conn->sspicred = NULL;
|
|
|
|
}
|
|
|
|
if (conn->sspictx)
|
|
|
|
{
|
|
|
|
DeleteSecurityContext(conn->sspictx);
|
|
|
|
free(conn->sspictx);
|
|
|
|
conn->sspictx = NULL;
|
|
|
|
}
|
|
|
|
conn->usesspi = 0;
|
|
|
|
#endif
|
|
|
|
if (conn->sasl_state)
|
|
|
|
{
|
2021-07-07 03:55:15 +02:00
|
|
|
conn->sasl->free(conn->sasl_state);
|
Clear auth context correctly when re-connecting after failed auth attempt.
If authentication over an SSL connection fails, with sslmode=prefer,
libpq will reconnect without SSL and retry. However, we did not clear
the variables related to GSS, SSPI, and SASL authentication state, when
reconnecting. Because of that, the second authentication attempt would
always fail with a "duplicate GSS/SASL authentication request" error.
pg_SSPI_startup did not check for duplicate authentication requests like
the corresponding GSS and SASL functions, so with SSPI, you would leak
some memory instead.
Another way this could manifest itself, on version 10, is if you list
multiple hostnames in the "host" parameter. If the first server requests
Kerberos or SCRAM authentication, but it fails, the attempts to connect to
the other servers will also fail with "duplicate authentication request"
errors.
To fix, move the clearing of authentication state from closePGconn to
pgDropConnection, so that it is cleared also when re-connecting.
Patch by Michael Paquier, with some kibitzing by me.
Backpatch down to 9.3. 9.2 has the same bug, but the code around closing
the connection is somewhat different, so that this patch doesn't apply.
To fix this in 9.2, I think we would need to back-port commit 210eb9b743
first, and then apply this patch. However, given that we only bumped into
this in our own testing, we haven't heard any reports from users about
this, and that 9.2 will be end-of-lifed in a couple of months anyway, it
doesn't seem worth the risk and trouble.
Discussion: https://www.postgresql.org/message-id/CAB7nPqRuOUm0MyJaUy9L3eXYJU3AKCZ-0-03=-aDTZJGV4GyWw@mail.gmail.com
2017-06-07 13:01:46 +02:00
|
|
|
conn->sasl_state = NULL;
|
|
|
|
}
|
2012-09-07 22:02:23 +02:00
|
|
|
}
|
|
|
|
|
2021-03-15 22:13:42 +01:00
|
|
|
/*
|
|
|
|
* pqFreeCommandQueue
|
|
|
|
* Free all the entries of PGcmdQueueEntry queue passed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pqFreeCommandQueue(PGcmdQueueEntry *queue)
|
|
|
|
{
|
|
|
|
while (queue != NULL)
|
|
|
|
{
|
|
|
|
PGcmdQueueEntry *cur = queue;
|
|
|
|
|
|
|
|
queue = cur->next;
|
2022-06-16 21:50:56 +02:00
|
|
|
free(cur->query);
|
2021-03-15 22:13:42 +01:00
|
|
|
free(cur);
|
|
|
|
}
|
|
|
|
}
|
2012-09-07 22:02:23 +02:00
|
|
|
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/*
|
|
|
|
* pqDropServerData
|
|
|
|
*
|
|
|
|
* Clear all connection state data that was received from (or deduced about)
|
|
|
|
* the server. This is essential to do between connection attempts to
|
|
|
|
* different servers, else we may incorrectly hold over some data from the
|
|
|
|
* old server.
|
|
|
|
*
|
|
|
|
* It would be better to merge this into pqDropConnection, perhaps, but
|
|
|
|
* right now we cannot because that function is called immediately on
|
|
|
|
* detection of connection loss (cf. pqReadData, for instance). This data
|
|
|
|
* should be kept until we are actually starting a new connection.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pqDropServerData(PGconn *conn)
|
|
|
|
{
|
|
|
|
PGnotify *notify;
|
|
|
|
pgParameterStatus *pstatus;
|
|
|
|
|
|
|
|
/* Forget pending notifies */
|
|
|
|
notify = conn->notifyHead;
|
|
|
|
while (notify != NULL)
|
|
|
|
{
|
|
|
|
PGnotify *prev = notify;
|
|
|
|
|
|
|
|
notify = notify->next;
|
|
|
|
free(prev);
|
|
|
|
}
|
|
|
|
conn->notifyHead = conn->notifyTail = NULL;
|
|
|
|
|
|
|
|
/* Reset ParameterStatus data, as well as variables deduced from it */
|
|
|
|
pstatus = conn->pstatus;
|
|
|
|
while (pstatus != NULL)
|
|
|
|
{
|
|
|
|
pgParameterStatus *prev = pstatus;
|
|
|
|
|
|
|
|
pstatus = pstatus->next;
|
|
|
|
free(prev);
|
|
|
|
}
|
|
|
|
conn->pstatus = NULL;
|
|
|
|
conn->client_encoding = PG_SQL_ASCII;
|
|
|
|
conn->std_strings = false;
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
conn->default_transaction_read_only = PG_BOOL_UNKNOWN;
|
|
|
|
conn->in_hot_standby = PG_BOOL_UNKNOWN;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->sversion = 0;
|
|
|
|
|
|
|
|
/* Drop large-object lookup data */
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->lobjfuncs);
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->lobjfuncs = NULL;
|
|
|
|
|
|
|
|
/* Reset assorted other per-connection state */
|
|
|
|
conn->last_sqlstate[0] = '\0';
|
|
|
|
conn->auth_req_received = false;
|
|
|
|
conn->password_needed = false;
|
Restructure libpq's handling of send failures.
Originally, if libpq got a failure (e.g., ECONNRESET) while trying to
send data to the server, it would just report that and wash its hands
of the matter. It was soon found that that wasn't a very pleasant way
of coping with server-initiated disconnections, so we introduced a hack
(pqHandleSendFailure) in the code that sends queries to make it peek
ahead for server error reports before reporting the send failure.
It now emerges that related cases can occur during connection setup;
in particular, as of TLS 1.3 it's unsafe to assume that SSL connection
failures will be reported by SSL_connect rather than during our first
send attempt. We could have fixed that in a hacky way by applying
pqHandleSendFailure after a startup packet send failure, but
(a) pqHandleSendFailure explicitly disclaims suitability for use in any
state except query startup, and (b) the problem still potentially exists
for other send attempts in libpq.
Instead, let's fix this in a more general fashion by eliminating
pqHandleSendFailure altogether, and instead arranging to postpone
all reports of send failures in libpq until after we've made an
attempt to read and process server messages. The send failure won't
be reported at all if we find a server message or detect input EOF.
(Note: this removes one of the reasons why libpq typically overwrites,
rather than appending to, conn->errorMessage: pqHandleSendFailure needed
that behavior so that the send failure report would be replaced if we
got a server message or read failure report. Eventually I'd like to get
rid of that overwrite behavior altogether, but today is not that day.
For the moment, pqSendSome is assuming that its callees will overwrite
not append to conn->errorMessage.)
Possibly this change should get back-patched someday; but it needs
testing first, so let's not consider that till after v12 beta.
Discussion: https://postgr.es/m/CAEepm=2n6Nv+5tFfe8YnkUm1fXgvxR0Mm1FoD+QKG-vLNGLyKg@mail.gmail.com
2019-03-19 21:20:20 +01:00
|
|
|
conn->write_failed = false;
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->write_err_msg);
|
Restructure libpq's handling of send failures.
Originally, if libpq got a failure (e.g., ECONNRESET) while trying to
send data to the server, it would just report that and wash its hands
of the matter. It was soon found that that wasn't a very pleasant way
of coping with server-initiated disconnections, so we introduced a hack
(pqHandleSendFailure) in the code that sends queries to make it peek
ahead for server error reports before reporting the send failure.
It now emerges that related cases can occur during connection setup;
in particular, as of TLS 1.3 it's unsafe to assume that SSL connection
failures will be reported by SSL_connect rather than during our first
send attempt. We could have fixed that in a hacky way by applying
pqHandleSendFailure after a startup packet send failure, but
(a) pqHandleSendFailure explicitly disclaims suitability for use in any
state except query startup, and (b) the problem still potentially exists
for other send attempts in libpq.
Instead, let's fix this in a more general fashion by eliminating
pqHandleSendFailure altogether, and instead arranging to postpone
all reports of send failures in libpq until after we've made an
attempt to read and process server messages. The send failure won't
be reported at all if we find a server message or detect input EOF.
(Note: this removes one of the reasons why libpq typically overwrites,
rather than appending to, conn->errorMessage: pqHandleSendFailure needed
that behavior so that the send failure report would be replaced if we
got a server message or read failure report. Eventually I'd like to get
rid of that overwrite behavior altogether, but today is not that day.
For the moment, pqSendSome is assuming that its callees will overwrite
not append to conn->errorMessage.)
Possibly this change should get back-patched someday; but it needs
testing first, so let's not consider that till after v12 beta.
Discussion: https://postgr.es/m/CAEepm=2n6Nv+5tFfe8YnkUm1fXgvxR0Mm1FoD+QKG-vLNGLyKg@mail.gmail.com
2019-03-19 21:20:20 +01:00
|
|
|
conn->write_err_msg = NULL;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->be_pid = 0;
|
|
|
|
conn->be_key = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2001-08-17 17:11:15 +02:00
|
|
|
/*
|
1999-11-30 04:08:19 +01:00
|
|
|
* Connecting to a Database
|
|
|
|
*
|
2010-01-28 07:28:26 +01:00
|
|
|
* There are now six different ways a user of this API can connect to the
|
1999-11-30 04:08:19 +01:00
|
|
|
* database. Two are not recommended for use in new code, because of their
|
|
|
|
* lack of extensibility with respect to the passing of options to the
|
|
|
|
* backend. These are PQsetdb and PQsetdbLogin (the former now being a macro
|
|
|
|
* to the latter).
|
|
|
|
*
|
|
|
|
* If it is desired to connect in a synchronous (blocking) manner, use the
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* function PQconnectdb or PQconnectdbParams. The former accepts a string of
|
|
|
|
* option = value pairs (or a URI) which must be parsed; the latter takes two
|
|
|
|
* NULL terminated arrays instead.
|
1999-11-30 04:08:19 +01:00
|
|
|
*
|
2003-03-10 23:28:22 +01:00
|
|
|
* To connect in an asynchronous (non-blocking) manner, use the functions
|
2010-01-28 07:28:26 +01:00
|
|
|
* PQconnectStart or PQconnectStartParams (which differ in the same way as
|
|
|
|
* PQconnectdb and PQconnectdbParams) and PQconnectPoll.
|
1999-11-30 04:08:19 +01:00
|
|
|
*
|
|
|
|
* Internally, the static functions connectDBStart, connectDBComplete
|
|
|
|
* are part of the connection procedure.
|
|
|
|
*/
|
|
|
|
|
2010-01-28 07:28:26 +01:00
|
|
|
/*
|
|
|
|
* PQconnectdbParams
|
|
|
|
*
|
|
|
|
* establishes a connection to a postgres backend through the postmaster
|
|
|
|
* using connection information in two arrays.
|
|
|
|
*
|
|
|
|
* The keywords array is defined as
|
|
|
|
*
|
|
|
|
* const char *params[] = {"option1", "option2", NULL}
|
|
|
|
*
|
|
|
|
* The values array is defined as
|
|
|
|
*
|
|
|
|
* const char *values[] = {"value1", "value2", NULL}
|
|
|
|
*
|
|
|
|
* Returns a PGconn* which is needed for all subsequent libpq calls, or NULL
|
|
|
|
* if a memory allocation failed.
|
|
|
|
* If the status field of the connection returned is CONNECTION_BAD,
|
|
|
|
* then some fields may be null'ed out instead of having valid values.
|
|
|
|
*
|
|
|
|
* You should call PQfinish (if conn is not NULL) regardless of whether this
|
|
|
|
* call succeeded.
|
|
|
|
*/
|
|
|
|
PGconn *
|
2011-09-26 00:52:48 +02:00
|
|
|
PQconnectdbParams(const char *const *keywords,
|
|
|
|
const char *const *values,
|
2010-02-05 04:09:05 +01:00
|
|
|
int expand_dbname)
|
2010-01-28 07:28:26 +01:00
|
|
|
{
|
2010-02-05 04:09:05 +01:00
|
|
|
PGconn *conn = PQconnectStartParams(keywords, values, expand_dbname);
|
2010-01-28 07:28:26 +01:00
|
|
|
|
|
|
|
if (conn && conn->status != CONNECTION_BAD)
|
|
|
|
(void) connectDBComplete(conn);
|
|
|
|
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
2010-11-27 07:30:34 +01:00
|
|
|
/*
|
|
|
|
* PQpingParams
|
|
|
|
*
|
|
|
|
* check server status, accepting parameters identical to PQconnectdbParams
|
|
|
|
*/
|
2010-11-25 19:09:38 +01:00
|
|
|
PGPing
|
2011-09-26 00:52:48 +02:00
|
|
|
PQpingParams(const char *const *keywords,
|
|
|
|
const char *const *values,
|
2010-11-27 07:30:34 +01:00
|
|
|
int expand_dbname)
|
2010-11-25 19:09:38 +01:00
|
|
|
{
|
|
|
|
PGconn *conn = PQconnectStartParams(keywords, values, expand_dbname);
|
|
|
|
PGPing ret;
|
|
|
|
|
|
|
|
ret = internal_ping(conn);
|
|
|
|
PQfinish(conn);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2001-08-17 17:11:15 +02:00
|
|
|
/*
|
1996-11-09 11:39:54 +01:00
|
|
|
* PQconnectdb
|
|
|
|
*
|
1997-11-10 16:41:58 +01:00
|
|
|
* establishes a connection to a postgres backend through the postmaster
|
1996-11-09 11:39:54 +01:00
|
|
|
* using connection information in a string.
|
|
|
|
*
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* The conninfo string is either a whitespace-separated list of
|
1996-11-09 11:39:54 +01:00
|
|
|
*
|
|
|
|
* option = value
|
|
|
|
*
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* definitions or a URI (refer to the documentation for details.) Value
|
|
|
|
* might be a single value containing no whitespaces or a single quoted
|
|
|
|
* string. If a single quote should appear anywhere in the value, it must be
|
|
|
|
* escaped with a backslash like \'
|
1996-11-09 11:39:54 +01:00
|
|
|
*
|
1999-11-30 04:08:19 +01:00
|
|
|
* Returns a PGconn* which is needed for all subsequent libpq calls, or NULL
|
|
|
|
* if a memory allocation failed.
|
|
|
|
* If the status field of the connection returned is CONNECTION_BAD,
|
|
|
|
* then some fields may be null'ed out instead of having valid values.
|
|
|
|
*
|
|
|
|
* You should call PQfinish (if conn is not NULL) regardless of whether this
|
|
|
|
* call succeeded.
|
2000-01-14 06:33:15 +01:00
|
|
|
*/
|
1997-11-10 16:41:58 +01:00
|
|
|
PGconn *
|
1996-11-09 11:39:54 +01:00
|
|
|
PQconnectdb(const char *conninfo)
|
1999-11-30 04:08:19 +01:00
|
|
|
{
|
|
|
|
PGconn *conn = PQconnectStart(conninfo);
|
2000-01-14 06:33:15 +01:00
|
|
|
|
|
|
|
if (conn && conn->status != CONNECTION_BAD)
|
|
|
|
(void) connectDBComplete(conn);
|
1999-11-30 04:08:19 +01:00
|
|
|
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
2010-11-27 07:30:34 +01:00
|
|
|
/*
|
|
|
|
* PQping
|
|
|
|
*
|
|
|
|
* check server status, accepting parameters identical to PQconnectdb
|
|
|
|
*/
|
2010-11-25 19:09:38 +01:00
|
|
|
PGPing
|
|
|
|
PQping(const char *conninfo)
|
|
|
|
{
|
|
|
|
PGconn *conn = PQconnectStart(conninfo);
|
|
|
|
PGPing ret;
|
|
|
|
|
|
|
|
ret = internal_ping(conn);
|
|
|
|
PQfinish(conn);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2001-08-17 17:11:15 +02:00
|
|
|
/*
|
2010-01-28 07:28:26 +01:00
|
|
|
* PQconnectStartParams
|
1999-11-30 04:08:19 +01:00
|
|
|
*
|
|
|
|
* Begins the establishment of a connection to a postgres backend through the
|
2010-01-28 07:28:26 +01:00
|
|
|
* postmaster using connection information in a struct.
|
1999-11-30 04:08:19 +01:00
|
|
|
*
|
2010-01-28 07:28:26 +01:00
|
|
|
* See comment for PQconnectdbParams for the definition of the string format.
|
1999-11-30 04:08:19 +01:00
|
|
|
*
|
|
|
|
* Returns a PGconn*. If NULL is returned, a malloc error has occurred, and
|
|
|
|
* you should not attempt to proceed with this connection. If the status
|
|
|
|
* field of the connection returned is CONNECTION_BAD, an error has
|
|
|
|
* occurred. In this case you should call PQfinish on the result, (perhaps
|
|
|
|
* inspecting the error message first). Other fields of the structure may not
|
|
|
|
* be valid if that occurs. If the status field is not CONNECTION_BAD, then
|
|
|
|
* this stage has succeeded - call PQconnectPoll, using select(2) to see when
|
|
|
|
* this is necessary.
|
|
|
|
*
|
|
|
|
* See PQconnectPoll for more info.
|
2000-01-14 06:33:15 +01:00
|
|
|
*/
|
1999-11-30 04:08:19 +01:00
|
|
|
PGconn *
|
2011-09-26 00:52:48 +02:00
|
|
|
PQconnectStartParams(const char *const *keywords,
|
|
|
|
const char *const *values,
|
2010-02-05 04:09:05 +01:00
|
|
|
int expand_dbname)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
2010-01-28 07:28:26 +01:00
|
|
|
PGconn *conn;
|
|
|
|
PQconninfoOption *connOptions;
|
1998-02-26 05:46:47 +01:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
/*
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
* Allocate memory for the conn structure. Note that we also expect this
|
|
|
|
* to initialize conn->errorMessage to empty. All subsequent steps during
|
|
|
|
* connection initialization will only append to that buffer.
|
1996-11-09 11:39:54 +01:00
|
|
|
*/
|
1998-05-07 01:51:16 +02:00
|
|
|
conn = makeEmptyPGconn();
|
1996-11-09 11:39:54 +01:00
|
|
|
if (conn == NULL)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
1996-11-09 11:39:54 +01:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
/*
|
2010-01-28 07:28:26 +01:00
|
|
|
* Parse the conninfo arrays
|
2003-04-28 06:29:12 +02:00
|
|
|
*/
|
2010-01-28 07:28:26 +01:00
|
|
|
connOptions = conninfo_array_parse(keywords, values,
|
2010-02-05 04:09:05 +01:00
|
|
|
&conn->errorMessage,
|
|
|
|
true, expand_dbname);
|
2010-01-28 07:28:26 +01:00
|
|
|
if (connOptions == NULL)
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
/* errorMessage is already set */
|
2011-04-03 00:05:42 +02:00
|
|
|
return conn;
|
2010-01-28 07:28:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move option values into conn structure
|
|
|
|
*/
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!fillPGconn(conn, connOptions))
|
|
|
|
{
|
|
|
|
PQconninfoFree(connOptions);
|
|
|
|
return conn;
|
|
|
|
}
|
2010-01-28 07:28:26 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the option info - all is in conn now
|
|
|
|
*/
|
|
|
|
PQconninfoFree(connOptions);
|
2003-04-28 06:29:12 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute derived options
|
|
|
|
*/
|
|
|
|
if (!connectOptions2(conn))
|
|
|
|
return conn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Connect to the database
|
|
|
|
*/
|
|
|
|
if (!connectDBStart(conn))
|
|
|
|
{
|
|
|
|
/* Just in case we failed to set it in connectDBStart */
|
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-01-28 07:28:26 +01:00
|
|
|
* PQconnectStart
|
2003-04-28 06:29:12 +02:00
|
|
|
*
|
2010-01-28 07:28:26 +01:00
|
|
|
* Begins the establishment of a connection to a postgres backend through the
|
|
|
|
* postmaster using connection information in a string.
|
2003-04-28 06:29:12 +02:00
|
|
|
*
|
2010-01-28 07:28:26 +01:00
|
|
|
* See comment for PQconnectdb for the definition of the string format.
|
|
|
|
*
|
|
|
|
* Returns a PGconn*. If NULL is returned, a malloc error has occurred, and
|
|
|
|
* you should not attempt to proceed with this connection. If the status
|
|
|
|
* field of the connection returned is CONNECTION_BAD, an error has
|
|
|
|
* occurred. In this case you should call PQfinish on the result, (perhaps
|
|
|
|
* inspecting the error message first). Other fields of the structure may not
|
|
|
|
* be valid if that occurs. If the status field is not CONNECTION_BAD, then
|
|
|
|
* this stage has succeeded - call PQconnectPoll, using select(2) to see when
|
|
|
|
* this is necessary.
|
|
|
|
*
|
|
|
|
* See PQconnectPoll for more info.
|
2003-04-28 06:29:12 +02:00
|
|
|
*/
|
2010-01-28 07:28:26 +01:00
|
|
|
PGconn *
|
|
|
|
PQconnectStart(const char *conninfo)
|
2003-04-28 06:29:12 +02:00
|
|
|
{
|
2010-01-28 07:28:26 +01:00
|
|
|
PGconn *conn;
|
|
|
|
|
|
|
|
/*
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
* Allocate memory for the conn structure. Note that we also expect this
|
|
|
|
* to initialize conn->errorMessage to empty. All subsequent steps during
|
|
|
|
* connection initialization will only append to that buffer.
|
2010-01-28 07:28:26 +01:00
|
|
|
*/
|
|
|
|
conn = makeEmptyPGconn();
|
|
|
|
if (conn == NULL)
|
|
|
|
return NULL;
|
2003-04-28 06:29:12 +02:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
/*
|
2000-03-11 04:08:37 +01:00
|
|
|
* Parse the conninfo string
|
1996-11-09 11:39:54 +01:00
|
|
|
*/
|
2010-01-28 07:28:26 +01:00
|
|
|
if (!connectOptions1(conn, conninfo))
|
|
|
|
return conn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute derived options
|
|
|
|
*/
|
|
|
|
if (!connectOptions2(conn))
|
|
|
|
return conn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Connect to the database
|
|
|
|
*/
|
|
|
|
if (!connectDBStart(conn))
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2010-01-28 07:28:26 +01:00
|
|
|
/* Just in case we failed to set it in connectDBStart */
|
1996-11-09 11:39:54 +01:00
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
}
|
2000-03-11 04:08:37 +01:00
|
|
|
|
2010-01-28 07:28:26 +01:00
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
2014-11-25 11:55:00 +01:00
|
|
|
/*
|
|
|
|
* Move option values into conn structure
|
|
|
|
*
|
|
|
|
* Don't put anything cute here --- intelligence should be in
|
|
|
|
* connectOptions2 ...
|
|
|
|
*
|
|
|
|
* Returns true on success. On failure, returns false and sets error message.
|
|
|
|
*/
|
|
|
|
static bool
|
2010-01-28 07:28:26 +01:00
|
|
|
fillPGconn(PGconn *conn, PQconninfoOption *connOptions)
|
|
|
|
{
|
2012-11-30 07:09:18 +01:00
|
|
|
const internalPQconninfoOption *option;
|
2010-01-28 07:28:26 +01:00
|
|
|
|
2012-11-30 07:09:18 +01:00
|
|
|
for (option = PQconninfoOptions; option->keyword; option++)
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
{
|
2014-11-30 18:20:44 +01:00
|
|
|
if (option->connofs >= 0)
|
2012-11-30 07:09:18 +01:00
|
|
|
{
|
2014-11-30 18:20:44 +01:00
|
|
|
const char *tmp = conninfo_getval(connOptions, option->keyword);
|
2012-11-30 07:09:18 +01:00
|
|
|
|
2014-11-25 11:55:00 +01:00
|
|
|
if (tmp)
|
|
|
|
{
|
2014-11-30 18:20:44 +01:00
|
|
|
char **connmember = (char **) ((char *) conn + option->connofs);
|
|
|
|
|
2022-06-16 21:50:56 +02:00
|
|
|
free(*connmember);
|
2014-11-25 11:55:00 +01:00
|
|
|
*connmember = strdup(tmp);
|
|
|
|
if (*connmember == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2014-11-25 11:55:00 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-11-30 07:09:18 +01:00
|
|
|
}
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
}
|
2014-11-25 11:55:00 +01:00
|
|
|
|
|
|
|
return true;
|
2010-01-28 07:28:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* connectOptions1
|
|
|
|
*
|
|
|
|
* Internal subroutine to set up connection parameters given an already-
|
|
|
|
* created PGconn and a conninfo string. Derived settings should be
|
|
|
|
* processed by calling connectOptions2 next. (We split them because
|
|
|
|
* PQsetdbLogin overrides defaults in between.)
|
|
|
|
*
|
|
|
|
* Returns true if OK, false if trouble (in which case errorMessage is set
|
|
|
|
* and so is conn->status).
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
connectOptions1(PGconn *conn, const char *conninfo)
|
|
|
|
{
|
|
|
|
PQconninfoOption *connOptions;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse the conninfo string
|
|
|
|
*/
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
connOptions = parse_connection_string(conninfo, &conn->errorMessage, true);
|
2010-01-28 07:28:26 +01:00
|
|
|
if (connOptions == NULL)
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
/* errorMessage is already set */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move option values into conn structure
|
|
|
|
*/
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!fillPGconn(conn, connOptions))
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
PQconninfoFree(connOptions);
|
|
|
|
return false;
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
/*
|
2000-03-11 04:08:37 +01:00
|
|
|
* Free the option info - all is in conn now
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2000-03-11 04:08:37 +01:00
|
|
|
PQconninfoFree(connOptions);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-07-10 11:28:57 +02:00
|
|
|
/*
|
|
|
|
* Count the number of elements in a simple comma-separated list.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
count_comma_separated_elems(const char *input)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
n = 1;
|
|
|
|
for (; *input != '\0'; input++)
|
|
|
|
{
|
|
|
|
if (*input == ',')
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse a simple comma-separated list.
|
|
|
|
*
|
|
|
|
* On each call, returns a malloc'd copy of the next element, and sets *more
|
|
|
|
* to indicate whether there are any more elements in the list after this,
|
|
|
|
* and updates *startptr to point to the next element, if any.
|
|
|
|
*
|
|
|
|
* On out of memory, returns NULL.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
parse_comma_separated_list(char **startptr, bool *more)
|
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
char *s = *startptr;
|
|
|
|
char *e;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search for the end of the current element; a comma or end-of-string
|
|
|
|
* acts as a terminator.
|
|
|
|
*/
|
|
|
|
e = s;
|
|
|
|
while (*e != '\0' && *e != ',')
|
|
|
|
++e;
|
|
|
|
*more = (*e == ',');
|
|
|
|
|
|
|
|
len = e - s;
|
|
|
|
p = (char *) malloc(sizeof(char) * (len + 1));
|
|
|
|
if (p)
|
|
|
|
{
|
|
|
|
memcpy(p, s, len);
|
|
|
|
p[len] = '\0';
|
|
|
|
}
|
|
|
|
*startptr = e + 1;
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
/*
|
|
|
|
* connectOptions2
|
|
|
|
*
|
|
|
|
* Compute derived connection options after absorbing all user-supplied info.
|
|
|
|
*
|
|
|
|
* Returns true if OK, false if trouble (in which case errorMessage is set
|
|
|
|
* and so is conn->status).
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
connectOptions2(PGconn *conn)
|
|
|
|
{
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
int i;
|
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
/*
|
|
|
|
* Allocate memory for details about each host to which we might possibly
|
2017-07-10 11:28:57 +02:00
|
|
|
* try to connect. For that, count the number of elements in the hostaddr
|
|
|
|
* or host options. If neither is given, assume one host.
|
2016-11-03 14:25:20 +01:00
|
|
|
*/
|
|
|
|
conn->whichhost = 0;
|
2017-07-10 11:28:57 +02:00
|
|
|
if (conn->pghostaddr && conn->pghostaddr[0] != '\0')
|
|
|
|
conn->nconnhost = count_comma_separated_elems(conn->pghostaddr);
|
|
|
|
else if (conn->pghost && conn->pghost[0] != '\0')
|
|
|
|
conn->nconnhost = count_comma_separated_elems(conn->pghost);
|
|
|
|
else
|
|
|
|
conn->nconnhost = 1;
|
2016-11-03 14:25:20 +01:00
|
|
|
conn->connhost = (pg_conn_host *)
|
|
|
|
calloc(conn->nconnhost, sizeof(pg_conn_host));
|
|
|
|
if (conn->connhost == NULL)
|
|
|
|
goto oom_error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We now have one pg_conn_host structure per possible host. Fill in the
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
* host and hostaddr fields for each, by splitting the parameter strings.
|
2016-11-03 14:25:20 +01:00
|
|
|
*/
|
|
|
|
if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0')
|
|
|
|
{
|
2017-07-10 11:28:57 +02:00
|
|
|
char *s = conn->pghostaddr;
|
|
|
|
bool more = true;
|
|
|
|
|
|
|
|
for (i = 0; i < conn->nconnhost && more; i++)
|
|
|
|
{
|
|
|
|
conn->connhost[i].hostaddr = parse_comma_separated_list(&s, &more);
|
|
|
|
if (conn->connhost[i].hostaddr == NULL)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If hostaddr was given, the array was allocated according to the
|
|
|
|
* number of elements in the hostaddr list, so it really should be the
|
|
|
|
* right size.
|
|
|
|
*/
|
|
|
|
Assert(!more);
|
|
|
|
Assert(i == conn->nconnhost);
|
2016-11-03 14:25:20 +01:00
|
|
|
}
|
2017-07-10 11:28:57 +02:00
|
|
|
|
|
|
|
if (conn->pghost != NULL && conn->pghost[0] != '\0')
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
|
|
|
char *s = conn->pghost;
|
2017-07-10 11:28:57 +02:00
|
|
|
bool more = true;
|
2016-11-03 14:25:20 +01:00
|
|
|
|
2017-07-10 11:28:57 +02:00
|
|
|
for (i = 0; i < conn->nconnhost && more; i++)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
2017-07-10 11:28:57 +02:00
|
|
|
conn->connhost[i].host = parse_comma_separated_list(&s, &more);
|
2016-11-03 14:25:20 +01:00
|
|
|
if (conn->connhost[i].host == NULL)
|
|
|
|
goto oom_error;
|
2017-07-10 11:28:57 +02:00
|
|
|
}
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
|
|
|
|
/* Check for wrong number of host items. */
|
2017-07-10 11:28:57 +02:00
|
|
|
if (more || i != conn->nconnhost)
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2018-01-21 13:40:55 +01:00
|
|
|
libpq_gettext("could not match %d host names to %d hostaddr values\n"),
|
2017-07-10 14:29:36 +02:00
|
|
|
count_comma_separated_elems(conn->pghost), conn->nconnhost);
|
2017-07-10 11:28:57 +02:00
|
|
|
return false;
|
2016-11-03 14:25:20 +01:00
|
|
|
}
|
|
|
|
}
|
2017-07-10 11:28:57 +02:00
|
|
|
|
|
|
|
/*
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
* Now, for each host slot, identify the type of address spec, and fill in
|
|
|
|
* the default address if nothing was given.
|
2017-07-10 11:28:57 +02:00
|
|
|
*/
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
for (i = 0; i < conn->nconnhost; i++)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
pg_conn_host *ch = &conn->connhost[i];
|
|
|
|
|
|
|
|
if (ch->hostaddr != NULL && ch->hostaddr[0] != '\0')
|
|
|
|
ch->type = CHT_HOST_ADDRESS;
|
|
|
|
else if (ch->host != NULL && ch->host[0] != '\0')
|
|
|
|
{
|
|
|
|
ch->type = CHT_HOST_NAME;
|
2020-11-25 08:14:23 +01:00
|
|
|
if (is_unixsock_path(ch->host))
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
ch->type = CHT_UNIX_SOCKET;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(ch->host);
|
2022-03-04 19:23:58 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This bit selects the default host location. If you change
|
|
|
|
* this, see also pg_regress.
|
|
|
|
*/
|
2020-01-31 16:26:12 +01:00
|
|
|
if (DEFAULT_PGSOCKET_DIR[0])
|
|
|
|
{
|
|
|
|
ch->host = strdup(DEFAULT_PGSOCKET_DIR);
|
|
|
|
ch->type = CHT_UNIX_SOCKET;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ch->host = strdup(DefaultHost);
|
|
|
|
ch->type = CHT_HOST_NAME;
|
|
|
|
}
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
if (ch->host == NULL)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
2016-11-03 14:25:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Next, work out the port number corresponding to each host name.
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
*
|
|
|
|
* Note: unlike the above for host names, this could leave the port fields
|
|
|
|
* as null or empty strings. We will substitute DEF_PGPORT whenever we
|
|
|
|
* read such a port field.
|
2016-11-03 14:25:20 +01:00
|
|
|
*/
|
|
|
|
if (conn->pgport != NULL && conn->pgport[0] != '\0')
|
|
|
|
{
|
|
|
|
char *s = conn->pgport;
|
2017-07-10 11:28:57 +02:00
|
|
|
bool more = true;
|
2016-11-03 14:25:20 +01:00
|
|
|
|
2017-07-10 11:28:57 +02:00
|
|
|
for (i = 0; i < conn->nconnhost && more; i++)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
2017-07-10 11:28:57 +02:00
|
|
|
conn->connhost[i].port = parse_comma_separated_list(&s, &more);
|
|
|
|
if (conn->connhost[i].port == NULL)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
2016-11-03 14:25:20 +01:00
|
|
|
|
2017-07-10 11:28:57 +02:00
|
|
|
/*
|
|
|
|
* If exactly one port was given, use it for every host. Otherwise,
|
|
|
|
* there must be exactly as many ports as there were hosts.
|
|
|
|
*/
|
|
|
|
if (i == 1 && !more)
|
|
|
|
{
|
|
|
|
for (i = 1; i < conn->nconnhost; i++)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
2017-07-10 11:28:57 +02:00
|
|
|
conn->connhost[i].port = strdup(conn->connhost[0].port);
|
2016-11-03 14:25:20 +01:00
|
|
|
if (conn->connhost[i].port == NULL)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
|
|
|
}
|
2017-07-10 11:28:57 +02:00
|
|
|
else if (more || i != conn->nconnhost)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2016-11-03 14:25:20 +01:00
|
|
|
libpq_gettext("could not match %d port numbers to %d hosts\n"),
|
2017-07-10 11:28:57 +02:00
|
|
|
count_comma_separated_elems(conn->pgport), conn->nconnhost);
|
2016-11-03 14:25:20 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix libpq's behavior when /etc/passwd isn't readable.
Some users run their applications in chroot environments that lack an
/etc/passwd file. This means that the current UID's user name and home
directory are not obtainable. libpq used to be all right with that,
so long as the database role name to use was specified explicitly.
But commit a4c8f14364c27508233f8a31ac4b10a4c90235a9 broke such cases by
causing any failure of pg_fe_getauthname() to be treated as a hard error.
In any case it did little to advance its nominal goal of causing errors
in pg_fe_getauthname() to be reported better. So revert that and instead
put some real error-reporting code in place. This requires changes to the
APIs of pg_fe_getauthname() and pqGetpwuid(), since the latter had
departed from the POSIX-specified API of getpwuid_r() in a way that made
it impossible to distinguish actual lookup errors from "no such user".
To allow such failures to be reported, while not failing if the caller
supplies a role name, add a second call of pg_fe_getauthname() in
connectOptions2(). This is a tad ugly, and could perhaps be avoided with
some refactoring of PQsetdbLogin(), but I'll leave that idea for later.
(Note that the complained-of misbehavior only occurs in PQsetdbLogin,
not when using the PQconnect functions, because in the latter we will
never bother to call pg_fe_getauthname() if the user gives a role name.)
In passing also clean up the Windows-side usage of GetUserName(): the
recommended buffer size is 257 bytes, the passed buffer length should
be the buffer size not buffer size less 1, and any error is reported
by GetLastError() not errno.
Per report from Christoph Berg. Back-patch to 9.4 where the chroot
failure case was introduced. The generally poor reporting of errors
here is of very long standing, of course, but given the lack of field
complaints about it we won't risk changing these APIs further back
(even though they're theoretically internal to libpq).
2015-01-11 18:35:44 +01:00
|
|
|
/*
|
|
|
|
* If user name was not given, fetch it. (Most likely, the fetch will
|
|
|
|
* fail, since the only way we get here is if pg_fe_getauthname() failed
|
|
|
|
* during conninfo_add_defaults(). But now we want an error message.)
|
|
|
|
*/
|
|
|
|
if (conn->pguser == NULL || conn->pguser[0] == '\0')
|
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pguser);
|
Fix libpq's behavior when /etc/passwd isn't readable.
Some users run their applications in chroot environments that lack an
/etc/passwd file. This means that the current UID's user name and home
directory are not obtainable. libpq used to be all right with that,
so long as the database role name to use was specified explicitly.
But commit a4c8f14364c27508233f8a31ac4b10a4c90235a9 broke such cases by
causing any failure of pg_fe_getauthname() to be treated as a hard error.
In any case it did little to advance its nominal goal of causing errors
in pg_fe_getauthname() to be reported better. So revert that and instead
put some real error-reporting code in place. This requires changes to the
APIs of pg_fe_getauthname() and pqGetpwuid(), since the latter had
departed from the POSIX-specified API of getpwuid_r() in a way that made
it impossible to distinguish actual lookup errors from "no such user".
To allow such failures to be reported, while not failing if the caller
supplies a role name, add a second call of pg_fe_getauthname() in
connectOptions2(). This is a tad ugly, and could perhaps be avoided with
some refactoring of PQsetdbLogin(), but I'll leave that idea for later.
(Note that the complained-of misbehavior only occurs in PQsetdbLogin,
not when using the PQconnect functions, because in the latter we will
never bother to call pg_fe_getauthname() if the user gives a role name.)
In passing also clean up the Windows-side usage of GetUserName(): the
recommended buffer size is 257 bytes, the passed buffer length should
be the buffer size not buffer size less 1, and any error is reported
by GetLastError() not errno.
Per report from Christoph Berg. Back-patch to 9.4 where the chroot
failure case was introduced. The generally poor reporting of errors
here is of very long standing, of course, but given the lack of field
complaints about it we won't risk changing these APIs further back
(even though they're theoretically internal to libpq).
2015-01-11 18:35:44 +01:00
|
|
|
conn->pguser = pg_fe_getauthname(&conn->errorMessage);
|
|
|
|
if (!conn->pguser)
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-05-05 02:44:56 +02:00
|
|
|
/*
|
|
|
|
* If database name was not given, default it to equal user name
|
|
|
|
*/
|
Fix libpq's behavior when /etc/passwd isn't readable.
Some users run their applications in chroot environments that lack an
/etc/passwd file. This means that the current UID's user name and home
directory are not obtainable. libpq used to be all right with that,
so long as the database role name to use was specified explicitly.
But commit a4c8f14364c27508233f8a31ac4b10a4c90235a9 broke such cases by
causing any failure of pg_fe_getauthname() to be treated as a hard error.
In any case it did little to advance its nominal goal of causing errors
in pg_fe_getauthname() to be reported better. So revert that and instead
put some real error-reporting code in place. This requires changes to the
APIs of pg_fe_getauthname() and pqGetpwuid(), since the latter had
departed from the POSIX-specified API of getpwuid_r() in a way that made
it impossible to distinguish actual lookup errors from "no such user".
To allow such failures to be reported, while not failing if the caller
supplies a role name, add a second call of pg_fe_getauthname() in
connectOptions2(). This is a tad ugly, and could perhaps be avoided with
some refactoring of PQsetdbLogin(), but I'll leave that idea for later.
(Note that the complained-of misbehavior only occurs in PQsetdbLogin,
not when using the PQconnect functions, because in the latter we will
never bother to call pg_fe_getauthname() if the user gives a role name.)
In passing also clean up the Windows-side usage of GetUserName(): the
recommended buffer size is 257 bytes, the passed buffer length should
be the buffer size not buffer size less 1, and any error is reported
by GetLastError() not errno.
Per report from Christoph Berg. Back-patch to 9.4 where the chroot
failure case was introduced. The generally poor reporting of errors
here is of very long standing, of course, but given the lack of field
complaints about it we won't risk changing these APIs further back
(even though they're theoretically internal to libpq).
2015-01-11 18:35:44 +01:00
|
|
|
if (conn->dbName == NULL || conn->dbName[0] == '\0')
|
2003-05-05 02:44:56 +02:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->dbName);
|
2003-05-05 02:44:56 +02:00
|
|
|
conn->dbName = strdup(conn->pguser);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->dbName)
|
|
|
|
goto oom_error;
|
2003-05-05 02:44:56 +02:00
|
|
|
}
|
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
/*
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
* If password was not given, try to look it up in password file. Note
|
|
|
|
* that the result might be different for each host/port pair.
|
2003-04-28 06:29:12 +02:00
|
|
|
*/
|
|
|
|
if (conn->pgpass == NULL || conn->pgpass[0] == '\0')
|
|
|
|
{
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
/* If password file wasn't specified, use ~/PGPASSFILE */
|
2017-01-24 23:06:21 +01:00
|
|
|
if (conn->pgpassfile == NULL || conn->pgpassfile[0] == '\0')
|
|
|
|
{
|
|
|
|
char homedir[MAXPGPATH];
|
|
|
|
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
if (pqGetHomeDirectory(homedir, sizeof(homedir)))
|
2017-01-24 23:06:21 +01:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pgpassfile);
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
conn->pgpassfile = malloc(MAXPGPATH);
|
|
|
|
if (!conn->pgpassfile)
|
|
|
|
goto oom_error;
|
|
|
|
snprintf(conn->pgpassfile, MAXPGPATH, "%s/%s",
|
|
|
|
homedir, PGPASSFILE);
|
2017-01-24 23:06:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
if (conn->pgpassfile != NULL && conn->pgpassfile[0] != '\0')
|
2014-11-25 11:55:00 +01:00
|
|
|
{
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
for (i = 0; i < conn->nconnhost; i++)
|
|
|
|
{
|
|
|
|
/*
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
* Try to get a password for this host from file. We use host
|
|
|
|
* for the hostname search key if given, else hostaddr (at
|
|
|
|
* least one of them is guaranteed nonempty by now).
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
*/
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
const char *pwhost = conn->connhost[i].host;
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
if (pwhost == NULL || pwhost[0] == '\0')
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
pwhost = conn->connhost[i].hostaddr;
|
|
|
|
|
|
|
|
conn->connhost[i].password =
|
|
|
|
passwordFromFile(pwhost,
|
|
|
|
conn->connhost[i].port,
|
|
|
|
conn->dbName,
|
|
|
|
conn->pguser,
|
|
|
|
conn->pgpassfile);
|
|
|
|
}
|
2014-11-25 11:55:00 +01:00
|
|
|
}
|
2000-11-14 00:37:54 +01:00
|
|
|
}
|
|
|
|
|
2019-09-23 22:45:23 +02:00
|
|
|
/*
|
|
|
|
* validate channel_binding option
|
|
|
|
*/
|
|
|
|
if (conn->channel_binding)
|
|
|
|
{
|
|
|
|
if (strcmp(conn->channel_binding, "disable") != 0
|
|
|
|
&& strcmp(conn->channel_binding, "prefer") != 0
|
|
|
|
&& strcmp(conn->channel_binding, "require") != 0)
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2020-06-15 08:22:52 +02:00
|
|
|
libpq_gettext("invalid %s value: \"%s\"\n"),
|
|
|
|
"channel_binding", conn->channel_binding);
|
2019-09-23 22:45:23 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->channel_binding = strdup(DefaultChannelBinding);
|
|
|
|
if (!conn->channel_binding)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
|
|
|
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
/*
|
|
|
|
* validate sslmode option
|
|
|
|
*/
|
|
|
|
if (conn->sslmode)
|
|
|
|
{
|
|
|
|
if (strcmp(conn->sslmode, "disable") != 0
|
|
|
|
&& strcmp(conn->sslmode, "allow") != 0
|
|
|
|
&& strcmp(conn->sslmode, "prefer") != 0
|
2009-04-24 11:43:10 +02:00
|
|
|
&& strcmp(conn->sslmode, "require") != 0
|
|
|
|
&& strcmp(conn->sslmode, "verify-ca") != 0
|
|
|
|
&& strcmp(conn->sslmode, "verify-full") != 0)
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2020-06-15 08:22:52 +02:00
|
|
|
libpq_gettext("invalid %s value: \"%s\"\n"),
|
|
|
|
"sslmode", conn->sslmode);
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef USE_SSL
|
|
|
|
switch (conn->sslmode[0])
|
|
|
|
{
|
|
|
|
case 'a': /* "allow" */
|
|
|
|
case 'p': /* "prefer" */
|
2003-08-04 02:43:34 +02:00
|
|
|
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
/*
|
|
|
|
* warn user that an SSL connection will never be negotiated
|
|
|
|
* since SSL was not compiled in?
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'r': /* "require" */
|
2009-04-24 11:43:10 +02:00
|
|
|
case 'v': /* "verify-ca" or "verify-full" */
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2003-09-22 02:23:35 +02:00
|
|
|
libpq_gettext("sslmode value \"%s\" invalid when SSL support is not compiled in\n"),
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
conn->sslmode);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
else
|
2014-11-25 11:55:00 +01:00
|
|
|
{
|
2003-08-01 23:27:27 +02:00
|
|
|
conn->sslmode = strdup(DefaultSSLMode);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->sslmode)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
|
2020-01-28 02:40:48 +01:00
|
|
|
/*
|
2020-04-30 06:39:10 +02:00
|
|
|
* Validate TLS protocol versions for ssl_min_protocol_version and
|
|
|
|
* ssl_max_protocol_version.
|
2020-01-28 02:40:48 +01:00
|
|
|
*/
|
2020-04-30 06:39:10 +02:00
|
|
|
if (!sslVerifyProtocolVersion(conn->ssl_min_protocol_version))
|
2020-01-28 02:40:48 +01:00
|
|
|
{
|
2020-02-02 19:09:33 +01:00
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2020-06-15 08:22:52 +02:00
|
|
|
libpq_gettext("invalid %s value: \"%s\"\n"),
|
|
|
|
"ssl_min_protocol_version",
|
2020-04-30 06:39:10 +02:00
|
|
|
conn->ssl_min_protocol_version);
|
2020-01-28 02:40:48 +01:00
|
|
|
return false;
|
|
|
|
}
|
2020-04-30 06:39:10 +02:00
|
|
|
if (!sslVerifyProtocolVersion(conn->ssl_max_protocol_version))
|
2020-01-28 02:40:48 +01:00
|
|
|
{
|
2020-02-02 19:09:33 +01:00
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2020-06-15 08:22:52 +02:00
|
|
|
libpq_gettext("invalid %s value: \"%s\"\n"),
|
|
|
|
"ssl_max_protocol_version",
|
2020-04-30 06:39:10 +02:00
|
|
|
conn->ssl_max_protocol_version);
|
2020-01-28 02:40:48 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the range of SSL protocols defined is correct. This is done
|
|
|
|
* at this early step because this is independent of the SSL
|
|
|
|
* implementation used, and this avoids unnecessary cycles with an
|
|
|
|
* already-built SSL context when the connection is being established, as
|
|
|
|
* it would be doomed anyway.
|
|
|
|
*/
|
2020-04-30 06:39:10 +02:00
|
|
|
if (!sslVerifyProtocolRange(conn->ssl_min_protocol_version,
|
|
|
|
conn->ssl_max_protocol_version))
|
2020-01-28 02:40:48 +01:00
|
|
|
{
|
2020-02-02 19:09:33 +01:00
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("invalid SSL protocol version range\n"));
|
2020-01-28 02:40:48 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
/*
|
|
|
|
* validate gssencmode option
|
|
|
|
*/
|
|
|
|
if (conn->gssencmode)
|
|
|
|
{
|
|
|
|
if (strcmp(conn->gssencmode, "disable") != 0 &&
|
|
|
|
strcmp(conn->gssencmode, "prefer") != 0 &&
|
|
|
|
strcmp(conn->gssencmode, "require") != 0)
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2020-06-15 08:22:52 +02:00
|
|
|
libpq_gettext("invalid %s value: \"%s\"\n"),
|
|
|
|
"gssencmode",
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
conn->gssencmode);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#ifndef ENABLE_GSS
|
|
|
|
if (strcmp(conn->gssencmode, "require") == 0)
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2019-09-06 16:12:28 +02:00
|
|
|
libpq_gettext("gssencmode value \"%s\" invalid when GSSAPI support is not compiled in\n"),
|
|
|
|
conn->gssencmode);
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->gssencmode = strdup(DefaultGSSMode);
|
|
|
|
if (!conn->gssencmode)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
|
|
|
|
2011-02-19 07:54:58 +01:00
|
|
|
/*
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
* validate target_session_attrs option, and set target_server_type
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
*/
|
|
|
|
if (conn->target_session_attrs)
|
|
|
|
{
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
if (strcmp(conn->target_session_attrs, "any") == 0)
|
|
|
|
conn->target_server_type = SERVER_TYPE_ANY;
|
|
|
|
else if (strcmp(conn->target_session_attrs, "read-write") == 0)
|
|
|
|
conn->target_server_type = SERVER_TYPE_READ_WRITE;
|
|
|
|
else if (strcmp(conn->target_session_attrs, "read-only") == 0)
|
|
|
|
conn->target_server_type = SERVER_TYPE_READ_ONLY;
|
|
|
|
else if (strcmp(conn->target_session_attrs, "primary") == 0)
|
|
|
|
conn->target_server_type = SERVER_TYPE_PRIMARY;
|
|
|
|
else if (strcmp(conn->target_session_attrs, "standby") == 0)
|
|
|
|
conn->target_server_type = SERVER_TYPE_STANDBY;
|
|
|
|
else if (strcmp(conn->target_session_attrs, "prefer-standby") == 0)
|
|
|
|
conn->target_server_type = SERVER_TYPE_PREFER_STANDBY;
|
|
|
|
else
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
{
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2020-06-15 08:22:52 +02:00
|
|
|
libpq_gettext("invalid %s value: \"%s\"\n"),
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
"target_session_attrs",
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
conn->target_session_attrs);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
else
|
|
|
|
conn->target_server_type = SERVER_TYPE_ANY;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Resolve special "auto" client_encoding from the locale
|
|
|
|
*/
|
|
|
|
if (conn->client_encoding_initial &&
|
|
|
|
strcmp(conn->client_encoding_initial, "auto") == 0)
|
|
|
|
{
|
|
|
|
free(conn->client_encoding_initial);
|
|
|
|
conn->client_encoding_initial = strdup(pg_encoding_to_char(pg_get_encoding_from_locale(NULL, true)));
|
|
|
|
if (!conn->client_encoding_initial)
|
|
|
|
goto oom_error;
|
|
|
|
}
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
|
2006-02-13 23:33:57 +01:00
|
|
|
/*
|
|
|
|
* Only if we get this far is it appropriate to try to connect. (We need a
|
|
|
|
* state flag, rather than just the boolean result of this function, in
|
|
|
|
* case someone tries to PQreset() the PGconn.)
|
|
|
|
*/
|
|
|
|
conn->options_valid = true;
|
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
return true;
|
2014-11-25 11:55:00 +01:00
|
|
|
|
|
|
|
oom_error:
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2014-11-25 11:55:00 +01:00
|
|
|
return false;
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
|
|
|
|
2001-08-17 17:11:15 +02:00
|
|
|
/*
|
1996-11-09 11:39:54 +01:00
|
|
|
* PQconndefaults
|
|
|
|
*
|
2012-03-22 17:08:34 +01:00
|
|
|
* Construct a default connection options array, which identifies all the
|
|
|
|
* available options and shows any default values that are available from the
|
|
|
|
* environment etc. On error (eg out of memory), NULL is returned.
|
2000-03-11 04:08:37 +01:00
|
|
|
*
|
|
|
|
* Using this function, an application may determine all possible options
|
|
|
|
* and their current default values.
|
|
|
|
*
|
|
|
|
* NOTE: as of PostgreSQL 7.0, the returned array is dynamically allocated
|
|
|
|
* and should be freed when no longer needed via PQconninfoFree(). (In prior
|
|
|
|
* versions, the returned array was static, but that's not thread-safe.)
|
|
|
|
* Pre-7.0 applications that use this function will see a small memory leak
|
|
|
|
* until they are updated to call PQconninfoFree.
|
1996-11-09 11:39:54 +01:00
|
|
|
*/
|
|
|
|
PQconninfoOption *
|
1996-11-10 04:06:38 +01:00
|
|
|
PQconndefaults(void)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
1999-08-31 03:37:37 +02:00
|
|
|
PQExpBufferData errorBuf;
|
2000-03-11 04:08:37 +01:00
|
|
|
PQconninfoOption *connOptions;
|
1996-11-09 11:39:54 +01:00
|
|
|
|
2012-03-22 17:08:34 +01:00
|
|
|
/* We don't actually report any errors here, but callees want a buffer */
|
1999-08-31 03:37:37 +02:00
|
|
|
initPQExpBuffer(&errorBuf);
|
2011-10-19 03:44:23 +02:00
|
|
|
if (PQExpBufferDataBroken(errorBuf))
|
2008-09-22 15:55:14 +02:00
|
|
|
return NULL; /* out of memory already :-( */
|
2012-03-22 17:08:34 +01:00
|
|
|
|
|
|
|
connOptions = conninfo_init(&errorBuf);
|
|
|
|
if (connOptions != NULL)
|
|
|
|
{
|
2013-12-03 17:11:56 +01:00
|
|
|
/* pass NULL errorBuf to ignore errors */
|
|
|
|
if (!conninfo_add_defaults(connOptions, NULL))
|
2012-03-22 17:08:34 +01:00
|
|
|
{
|
|
|
|
PQconninfoFree(connOptions);
|
|
|
|
connOptions = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-08-31 03:37:37 +02:00
|
|
|
termPQExpBuffer(&errorBuf);
|
2000-03-11 04:08:37 +01:00
|
|
|
return connOptions;
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------
|
1997-12-04 01:28:15 +01:00
|
|
|
* PQsetdbLogin
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1996-10-10 10:20:11 +02:00
|
|
|
* establishes a connection to a postgres backend through the postmaster
|
1996-07-09 08:22:35 +02:00
|
|
|
* at the specified host and port.
|
|
|
|
*
|
|
|
|
* returns a PGconn* which is needed for all subsequent libpq calls
|
1997-03-12 22:23:16 +01:00
|
|
|
*
|
2003-04-28 06:29:12 +02:00
|
|
|
* if the status field of the connection returned is CONNECTION_BAD,
|
|
|
|
* then only the errorMessage is likely to be useful.
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------
|
|
|
|
*/
|
|
|
|
PGconn *
|
2000-01-14 06:33:15 +01:00
|
|
|
PQsetdbLogin(const char *pghost, const char *pgport, const char *pgoptions,
|
|
|
|
const char *pgtty, const char *dbName, const char *login,
|
|
|
|
const char *pwd)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1999-02-05 05:25:55 +01:00
|
|
|
PGconn *conn;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
/*
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
* Allocate memory for the conn structure. Note that we also expect this
|
|
|
|
* to initialize conn->errorMessage to empty. All subsequent steps during
|
|
|
|
* connection initialization will only append to that buffer.
|
2003-04-28 06:29:12 +02:00
|
|
|
*/
|
1998-05-07 01:51:16 +02:00
|
|
|
conn = makeEmptyPGconn();
|
1996-08-19 15:25:40 +02:00
|
|
|
if (conn == NULL)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2006-12-19 02:53:36 +01:00
|
|
|
/*
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* If the dbName parameter contains what looks like a connection string,
|
|
|
|
* parse it into conn struct using connectOptions1.
|
2006-12-19 02:53:36 +01:00
|
|
|
*/
|
2015-04-02 16:10:22 +02:00
|
|
|
if (dbName && recognized_connection_string(dbName))
|
2006-12-19 02:53:36 +01:00
|
|
|
{
|
|
|
|
if (!connectOptions1(conn, dbName))
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Old-style path: first, parse an empty conninfo string in order to
|
|
|
|
* set up the same defaults that PQconnectdb() would use.
|
|
|
|
*/
|
|
|
|
if (!connectOptions1(conn, ""))
|
|
|
|
return conn;
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2006-12-19 02:53:36 +01:00
|
|
|
/* Insert dbName parameter value into struct */
|
|
|
|
if (dbName && dbName[0] != '\0')
|
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->dbName);
|
2006-12-19 02:53:36 +01:00
|
|
|
conn->dbName = strdup(dbName);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->dbName)
|
|
|
|
goto oom_error;
|
2006-12-19 02:53:36 +01:00
|
|
|
}
|
|
|
|
}
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2006-12-19 02:53:36 +01:00
|
|
|
/*
|
|
|
|
* Insert remaining parameters into struct, overriding defaults (as well
|
|
|
|
* as any conflicting data from dbName taken as a conninfo).
|
2000-11-14 00:37:54 +01:00
|
|
|
*/
|
2003-04-28 06:29:12 +02:00
|
|
|
if (pghost && pghost[0] != '\0')
|
2000-11-14 00:37:54 +01:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pghost);
|
2003-04-28 06:29:12 +02:00
|
|
|
conn->pghost = strdup(pghost);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->pghost)
|
|
|
|
goto oom_error;
|
2000-11-14 00:37:54 +01:00
|
|
|
}
|
UUNET is looking into offering PostgreSQL as a part of a managed web
hosting product, on both shared and dedicated machines. We currently
offer Oracle and MySQL, and it would be a nice middle-ground.
However, as shipped, PostgreSQL lacks the following features we need
that MySQL has:
1. The ability to listen only on a particular IP address. Each
hosting customer has their own IP address, on which all of their
servers (http, ftp, real media, etc.) run.
2. The ability to place the Unix-domain socket in a mode 700 directory.
This allows us to automatically create an empty database, with an
empty DBA password, for new or upgrading customers without having
to interactively set a DBA password and communicate it to (or from)
the customer. This in turn cuts down our install and upgrade times.
3. The ability to connect to the Unix-domain socket from within a
change-rooted environment. We run CGI programs chrooted to the
user's home directory, which is another reason why we need to be
able to specify where the Unix-domain socket is, instead of /tmp.
4. The ability to, if run as root, open a pid file in /var/run as
root, and then setuid to the desired user. (mysqld -u can almost
do this; I had to patch it, too).
The patch below fixes problem 1-3. I plan to address #4, also, but
haven't done so yet. These diffs are big enough that they should give
the PG development team something to think about in the meantime :-)
Also, I'm about to leave for 2 weeks' vacation, so I thought I'd get
out what I have, which works (for the problems it tackles), now.
With these changes, we can set up and run PostgreSQL with scripts the
same way we can with apache or proftpd or mysql.
In summary, this patch makes the following enhancements:
1. Adds an environment variable PGUNIXSOCKET, analogous to MYSQL_UNIX_PORT,
and command line options -k --unix-socket to the relevant programs.
2. Adds a -h option to postmaster to set the hostname or IP address to
listen on instead of the default INADDR_ANY.
3. Extends some library interfaces to support the above.
4. Fixes a few memory leaks in PQconnectdb().
The default behavior is unchanged from stock 7.0.2; if you don't use
any of these new features, they don't change the operation.
David J. MacKenzie
2000-11-13 16:18:15 +01:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
if (pgport && pgport[0] != '\0')
|
1998-05-07 01:51:16 +02:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pgport);
|
2003-04-28 06:29:12 +02:00
|
|
|
conn->pgport = strdup(pgport);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->pgport)
|
|
|
|
goto oom_error;
|
1998-05-07 01:51:16 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
if (pgoptions && pgoptions[0] != '\0')
|
1998-05-07 01:51:16 +02:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pgoptions);
|
1998-05-07 01:51:16 +02:00
|
|
|
conn->pgoptions = strdup(pgoptions);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->pgoptions)
|
|
|
|
goto oom_error;
|
2003-04-28 06:29:12 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
if (login && login[0] != '\0')
|
1998-05-07 01:51:16 +02:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pguser);
|
2003-04-28 06:29:12 +02:00
|
|
|
conn->pguser = strdup(login);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->pguser)
|
|
|
|
goto oom_error;
|
1998-05-07 01:51:16 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
if (pwd && pwd[0] != '\0')
|
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pgpass);
|
2002-08-15 04:56:19 +02:00
|
|
|
conn->pgpass = strdup(pwd);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!conn->pgpass)
|
|
|
|
goto oom_error;
|
2003-04-28 06:29:12 +02:00
|
|
|
}
|
2002-10-25 01:35:55 +02:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
/*
|
|
|
|
* Compute derived options
|
|
|
|
*/
|
|
|
|
if (!connectOptions2(conn))
|
|
|
|
return conn;
|
2000-08-30 16:54:24 +02:00
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
/*
|
|
|
|
* Connect to the database
|
|
|
|
*/
|
|
|
|
if (connectDBStart(conn))
|
|
|
|
(void) connectDBComplete(conn);
|
2000-04-12 19:17:23 +02:00
|
|
|
|
1996-08-19 15:25:40 +02:00
|
|
|
return conn;
|
2014-11-25 11:55:00 +01:00
|
|
|
|
|
|
|
oom_error:
|
|
|
|
conn->status = CONNECTION_BAD;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2014-11-25 11:55:00 +01:00
|
|
|
return conn;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/* ----------
|
|
|
|
* connectNoDelay -
|
|
|
|
* Sets the TCP_NODELAY socket option.
|
|
|
|
* Returns 1 if successful, 0 if not.
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
connectNoDelay(PGconn *conn)
|
|
|
|
{
|
2003-06-12 09:36:51 +02:00
|
|
|
#ifdef TCP_NODELAY
|
1999-11-30 04:08:19 +01:00
|
|
|
int on = 1;
|
|
|
|
|
2000-05-21 23:19:53 +02:00
|
|
|
if (setsockopt(conn->sock, IPPROTO_TCP, TCP_NODELAY,
|
2000-06-14 20:18:01 +02:00
|
|
|
(char *) &on,
|
1999-11-30 04:08:19 +01:00
|
|
|
sizeof(on)) < 0)
|
|
|
|
{
|
2018-09-26 18:35:57 +02:00
|
|
|
char sebuf[PG_STRERROR_R_BUFLEN];
|
2003-06-14 19:49:54 +02:00
|
|
|
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2001-07-15 15:45:04 +02:00
|
|
|
libpq_gettext("could not set socket to TCP no delay mode: %s\n"),
|
2003-06-14 19:49:54 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
1999-11-30 04:08:19 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2003-06-12 09:36:51 +02:00
|
|
|
#endif
|
1999-11-30 04:08:19 +01:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-11-19 18:34:12 +01:00
|
|
|
/* ----------
|
|
|
|
* Write currently connected IP address into host_addr (of len host_addr_len).
|
|
|
|
* If unable to, set it to the empty string.
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
getHostaddr(PGconn *conn, char *host_addr, int host_addr_len)
|
|
|
|
{
|
|
|
|
struct sockaddr_storage *addr = &conn->raddr.addr;
|
|
|
|
|
2019-06-15 00:02:26 +02:00
|
|
|
if (addr->ss_family == AF_INET)
|
2018-11-19 18:34:12 +01:00
|
|
|
{
|
2019-08-19 01:27:23 +02:00
|
|
|
if (pg_inet_net_ntop(AF_INET,
|
|
|
|
&((struct sockaddr_in *) addr)->sin_addr.s_addr,
|
|
|
|
32,
|
|
|
|
host_addr, host_addr_len) == NULL)
|
2018-11-19 18:34:12 +01:00
|
|
|
host_addr[0] = '\0';
|
|
|
|
}
|
|
|
|
else if (addr->ss_family == AF_INET6)
|
|
|
|
{
|
2019-08-19 01:27:23 +02:00
|
|
|
if (pg_inet_net_ntop(AF_INET6,
|
|
|
|
&((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr,
|
|
|
|
128,
|
|
|
|
host_addr, host_addr_len) == NULL)
|
2018-11-19 18:34:12 +01:00
|
|
|
host_addr[0] = '\0';
|
|
|
|
}
|
|
|
|
else
|
|
|
|
host_addr[0] = '\0';
|
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
|
2021-01-21 22:10:18 +01:00
|
|
|
/*
|
|
|
|
* emitHostIdentityInfo -
|
|
|
|
* Speculatively append "connection to server so-and-so failed: " to
|
|
|
|
* conn->errorMessage once we've identified the current connection target
|
|
|
|
* address. This ensures that any subsequent error message will be properly
|
|
|
|
* attributed to the server we couldn't connect to. conn->raddr must be
|
|
|
|
* valid, and the result of getHostaddr() must be supplied.
|
2000-12-01 00:20:51 +01:00
|
|
|
*/
|
|
|
|
static void
|
2021-01-21 22:10:18 +01:00
|
|
|
emitHostIdentityInfo(PGconn *conn, const char *host_addr)
|
2000-12-01 00:20:51 +01:00
|
|
|
{
|
2022-02-15 10:03:52 +01:00
|
|
|
if (conn->raddr.addr.ss_family == AF_UNIX)
|
2003-07-24 01:30:41 +02:00
|
|
|
{
|
|
|
|
char service[NI_MAXHOST];
|
|
|
|
|
2005-10-17 18:24:20 +02:00
|
|
|
pg_getnameinfo_all(&conn->raddr.addr, conn->raddr.salen,
|
|
|
|
NULL, 0,
|
|
|
|
service, sizeof(service),
|
|
|
|
NI_NUMERICSERV);
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-01-21 22:10:18 +01:00
|
|
|
libpq_gettext("connection to server on socket \"%s\" failed: "),
|
2003-07-24 01:30:41 +02:00
|
|
|
service);
|
|
|
|
}
|
2000-12-01 00:20:51 +01:00
|
|
|
else
|
2003-07-24 01:30:41 +02:00
|
|
|
{
|
2011-05-19 21:56:53 +02:00
|
|
|
const char *displayed_host;
|
2016-11-03 14:25:20 +01:00
|
|
|
const char *displayed_port;
|
2010-11-24 23:04:19 +01:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
/* To which host and port were we actually connecting? */
|
2017-07-10 11:28:57 +02:00
|
|
|
if (conn->connhost[conn->whichhost].type == CHT_HOST_ADDRESS)
|
|
|
|
displayed_host = conn->connhost[conn->whichhost].hostaddr;
|
|
|
|
else
|
|
|
|
displayed_host = conn->connhost[conn->whichhost].host;
|
2016-11-03 14:25:20 +01:00
|
|
|
displayed_port = conn->connhost[conn->whichhost].port;
|
|
|
|
if (displayed_port == NULL || displayed_port[0] == '\0')
|
|
|
|
displayed_port = DEF_PGPORT_STR;
|
2011-05-19 21:56:53 +02:00
|
|
|
|
2010-12-18 17:25:41 +01:00
|
|
|
/*
|
|
|
|
* If the user did not supply an IP address using 'hostaddr', and
|
|
|
|
* 'host' was missing or does not match our lookup, display the
|
|
|
|
* looked-up IP address.
|
|
|
|
*/
|
2017-07-10 11:28:57 +02:00
|
|
|
if (conn->connhost[conn->whichhost].type != CHT_HOST_ADDRESS &&
|
2021-01-11 20:03:39 +01:00
|
|
|
host_addr[0] &&
|
2017-07-10 11:28:57 +02:00
|
|
|
strcmp(displayed_host, host_addr) != 0)
|
2011-05-19 21:56:53 +02:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-01-21 22:10:18 +01:00
|
|
|
libpq_gettext("connection to server at \"%s\" (%s), port %s failed: "),
|
2018-11-19 18:34:12 +01:00
|
|
|
displayed_host, host_addr,
|
2016-11-03 14:25:20 +01:00
|
|
|
displayed_port);
|
2011-05-19 21:56:53 +02:00
|
|
|
else
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-01-21 22:10:18 +01:00
|
|
|
libpq_gettext("connection to server at \"%s\", port %s failed: "),
|
2011-05-19 21:56:53 +02:00
|
|
|
displayed_host,
|
2016-11-03 14:25:20 +01:00
|
|
|
displayed_port);
|
2003-07-24 01:30:41 +02:00
|
|
|
}
|
2000-12-01 00:20:51 +01:00
|
|
|
}
|
|
|
|
|
2021-01-11 20:03:39 +01:00
|
|
|
/* ----------
|
|
|
|
* connectFailureMessage -
|
|
|
|
* create a friendly error message on connection failure,
|
|
|
|
* using the given errno value. Use this for error cases that
|
|
|
|
* imply that there's no server there.
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
connectFailureMessage(PGconn *conn, int errorno)
|
|
|
|
{
|
|
|
|
char sebuf[PG_STRERROR_R_BUFLEN];
|
|
|
|
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
"%s\n",
|
|
|
|
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)));
|
|
|
|
|
2022-02-15 10:03:52 +01:00
|
|
|
if (conn->raddr.addr.ss_family == AF_UNIX)
|
2021-01-11 20:03:39 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("\tIs the server running locally and accepting connections on that socket?\n"));
|
|
|
|
else
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("\tIs the server running on that host and accepting TCP/IP connections?\n"));
|
|
|
|
}
|
|
|
|
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
/*
|
|
|
|
* Should we use keepalives? Returns 1 if yes, 0 if no, and -1 if
|
|
|
|
* conn->keepalives is set to a value which is not parseable as an
|
|
|
|
* integer.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
useKeepalives(PGconn *conn)
|
|
|
|
{
|
|
|
|
char *ep;
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (conn->keepalives == NULL)
|
|
|
|
return 1;
|
|
|
|
val = strtol(conn->keepalives, &ep, 10);
|
|
|
|
if (*ep)
|
|
|
|
return -1;
|
|
|
|
return val != 0 ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2018-09-11 23:46:01 +02:00
|
|
|
/*
|
|
|
|
* Parse and try to interpret "value" as an integer value, and if successful,
|
|
|
|
* store it in *result, complaining if there is any trailing garbage or an
|
2019-10-21 04:17:13 +02:00
|
|
|
* overflow. This allows any number of leading and trailing whitespaces.
|
2018-09-11 23:46:01 +02:00
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
parse_int_param(const char *value, int *result, PGconn *conn,
|
|
|
|
const char *context)
|
|
|
|
{
|
|
|
|
char *end;
|
|
|
|
long numval;
|
|
|
|
|
2019-10-23 04:34:18 +02:00
|
|
|
Assert(value != NULL);
|
|
|
|
|
2018-09-11 23:46:01 +02:00
|
|
|
*result = 0;
|
|
|
|
|
2019-10-21 04:17:13 +02:00
|
|
|
/* strtol(3) skips leading whitespaces */
|
2018-09-11 23:46:01 +02:00
|
|
|
errno = 0;
|
|
|
|
numval = strtol(value, &end, 10);
|
|
|
|
|
2019-10-21 04:17:13 +02:00
|
|
|
/*
|
|
|
|
* If no progress was done during the parsing or an error happened, fail.
|
|
|
|
* This tests properly for overflows of the result.
|
|
|
|
*/
|
|
|
|
if (value == end || errno != 0 || numval != (int) numval)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip any trailing whitespace; if anything but whitespace remains before
|
|
|
|
* the terminating character, fail
|
|
|
|
*/
|
2019-10-23 04:34:18 +02:00
|
|
|
while (*end != '\0' && isspace((unsigned char) *end))
|
2019-10-21 04:17:13 +02:00
|
|
|
end++;
|
|
|
|
|
2019-10-23 04:34:18 +02:00
|
|
|
if (*end != '\0')
|
2019-10-21 04:17:13 +02:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
*result = numval;
|
|
|
|
return true;
|
|
|
|
|
|
|
|
error:
|
2018-09-11 23:46:01 +02:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2019-09-06 16:12:28 +02:00
|
|
|
libpq_gettext("invalid integer value \"%s\" for connection option \"%s\"\n"),
|
2018-09-11 23:46:01 +02:00
|
|
|
value, context);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-07-08 12:20:14 +02:00
|
|
|
#ifndef WIN32
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
/*
|
|
|
|
* Set the keepalive idle timer.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
setKeepalivesIdle(PGconn *conn)
|
|
|
|
{
|
|
|
|
int idle;
|
|
|
|
|
|
|
|
if (conn->keepalives_idle == NULL)
|
|
|
|
return 1;
|
|
|
|
|
2018-09-11 23:46:01 +02:00
|
|
|
if (!parse_int_param(conn->keepalives_idle, &idle, conn,
|
|
|
|
"keepalives_idle"))
|
|
|
|
return 0;
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
if (idle < 0)
|
|
|
|
idle = 0;
|
|
|
|
|
2017-06-28 18:30:16 +02:00
|
|
|
#ifdef PG_TCP_KEEPALIVE_IDLE
|
|
|
|
if (setsockopt(conn->sock, IPPROTO_TCP, PG_TCP_KEEPALIVE_IDLE,
|
2010-07-06 23:14:25 +02:00
|
|
|
(char *) &idle, sizeof(idle)) < 0)
|
|
|
|
{
|
2018-09-26 18:35:57 +02:00
|
|
|
char sebuf[PG_STRERROR_R_BUFLEN];
|
2010-07-06 23:14:25 +02:00
|
|
|
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-04-23 14:18:11 +02:00
|
|
|
libpq_gettext("%s(%s) failed: %s\n"),
|
|
|
|
"setsockopt",
|
2017-06-28 18:30:16 +02:00
|
|
|
PG_TCP_KEEPALIVE_IDLE_STR,
|
2010-07-06 23:14:25 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
|
|
|
return 0;
|
|
|
|
}
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the keepalive interval.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
setKeepalivesInterval(PGconn *conn)
|
|
|
|
{
|
|
|
|
int interval;
|
|
|
|
|
|
|
|
if (conn->keepalives_interval == NULL)
|
|
|
|
return 1;
|
|
|
|
|
2018-09-11 23:46:01 +02:00
|
|
|
if (!parse_int_param(conn->keepalives_interval, &interval, conn,
|
|
|
|
"keepalives_interval"))
|
|
|
|
return 0;
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
if (interval < 0)
|
|
|
|
interval = 0;
|
|
|
|
|
|
|
|
#ifdef TCP_KEEPINTVL
|
|
|
|
if (setsockopt(conn->sock, IPPROTO_TCP, TCP_KEEPINTVL,
|
|
|
|
(char *) &interval, sizeof(interval)) < 0)
|
|
|
|
{
|
2018-09-26 18:35:57 +02:00
|
|
|
char sebuf[PG_STRERROR_R_BUFLEN];
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-04-23 14:18:11 +02:00
|
|
|
libpq_gettext("%s(%s) failed: %s\n"),
|
|
|
|
"setsockopt",
|
2017-06-28 18:30:16 +02:00
|
|
|
"TCP_KEEPINTVL",
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the count of lost keepalive packets that will trigger a connection
|
|
|
|
* break.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
setKeepalivesCount(PGconn *conn)
|
|
|
|
{
|
|
|
|
int count;
|
|
|
|
|
|
|
|
if (conn->keepalives_count == NULL)
|
|
|
|
return 1;
|
|
|
|
|
2018-09-11 23:46:01 +02:00
|
|
|
if (!parse_int_param(conn->keepalives_count, &count, conn,
|
|
|
|
"keepalives_count"))
|
|
|
|
return 0;
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
if (count < 0)
|
|
|
|
count = 0;
|
|
|
|
|
|
|
|
#ifdef TCP_KEEPCNT
|
|
|
|
if (setsockopt(conn->sock, IPPROTO_TCP, TCP_KEEPCNT,
|
|
|
|
(char *) &count, sizeof(count)) < 0)
|
|
|
|
{
|
2018-09-26 18:35:57 +02:00
|
|
|
char sebuf[PG_STRERROR_R_BUFLEN];
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-04-23 14:18:11 +02:00
|
|
|
libpq_gettext("%s(%s) failed: %s\n"),
|
|
|
|
"setsockopt",
|
2017-06-28 18:30:16 +02:00
|
|
|
"TCP_KEEPCNT",
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2017-06-28 00:47:57 +02:00
|
|
|
#else /* WIN32 */
|
2010-07-08 18:19:50 +02:00
|
|
|
#ifdef SIO_KEEPALIVE_VALS
|
2010-07-08 12:20:14 +02:00
|
|
|
/*
|
|
|
|
* Enable keepalives and set the keepalive values on Win32,
|
|
|
|
* where they are always set in one batch.
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
*
|
|
|
|
* CAUTION: This needs to be signal safe, since it's used by PQcancel.
|
2010-07-08 12:20:14 +02:00
|
|
|
*/
|
|
|
|
static int
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
setKeepalivesWin32(pgsocket sock, int idle, int interval)
|
2010-07-08 12:20:14 +02:00
|
|
|
{
|
|
|
|
struct tcp_keepalive ka;
|
|
|
|
DWORD retsize;
|
|
|
|
|
|
|
|
if (idle <= 0)
|
|
|
|
idle = 2 * 60 * 60; /* 2 hours = default */
|
|
|
|
if (interval <= 0)
|
|
|
|
interval = 1; /* 1 second = default */
|
|
|
|
|
|
|
|
ka.onoff = 1;
|
|
|
|
ka.keepalivetime = idle * 1000;
|
|
|
|
ka.keepaliveinterval = interval * 1000;
|
|
|
|
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
if (WSAIoctl(sock,
|
2010-07-08 12:20:14 +02:00
|
|
|
SIO_KEEPALIVE_VALS,
|
|
|
|
(LPVOID) &ka,
|
|
|
|
sizeof(ka),
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
&retsize,
|
|
|
|
NULL,
|
|
|
|
NULL)
|
|
|
|
!= 0)
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
prepKeepalivesWin32(PGconn *conn)
|
|
|
|
{
|
|
|
|
int idle = -1;
|
|
|
|
int interval = -1;
|
|
|
|
|
|
|
|
if (conn->keepalives_idle &&
|
|
|
|
!parse_int_param(conn->keepalives_idle, &idle, conn,
|
|
|
|
"keepalives_idle"))
|
|
|
|
return 0;
|
|
|
|
if (conn->keepalives_interval &&
|
|
|
|
!parse_int_param(conn->keepalives_interval, &interval, conn,
|
|
|
|
"keepalives_interval"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!setKeepalivesWin32(conn->sock, idle, interval))
|
2010-07-08 12:20:14 +02:00
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-05-03 07:27:31 +02:00
|
|
|
libpq_gettext("%s(%s) failed: error code %d\n"),
|
|
|
|
"WSAIoctl", "SIO_KEEPALIVE_VALS",
|
2010-07-08 12:20:14 +02:00
|
|
|
WSAGetLastError());
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
2010-07-08 18:19:50 +02:00
|
|
|
#endif /* SIO_KEEPALIVE_VALS */
|
2010-07-08 12:20:14 +02:00
|
|
|
#endif /* WIN32 */
|
2000-12-01 00:20:51 +01:00
|
|
|
|
Add support TCP user timeout in libpq and the backend server
Similarly to the set of parameters for keepalive, a connection parameter
for libpq is added as well as a backend GUC, called tcp_user_timeout.
Increasing the TCP user timeout is useful to allow a connection to
survive extended periods without end-to-end connection, and decreasing
it allows application to fail faster. By default, the parameter is 0,
which makes the connection use the system default, and follows a logic
close to the keepalive parameters in its handling. When connecting
through a Unix-socket domain, the parameters have no effect.
Author: Ryohei Nagaura
Reviewed-by: Fabien Coelho, Robert Haas, Kyotaro Horiguchi, Kirk
Jamison, Mikalai Keida, Takayuki Tsunakawa, Andrei Yahorau
Discussion: https://postgr.es/m/EDA4195584F5064680D8130B1CA91C45367328@G01JPEXMBYT04
2019-04-06 08:23:37 +02:00
|
|
|
/*
|
|
|
|
* Set the TCP user timeout.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
setTCPUserTimeout(PGconn *conn)
|
|
|
|
{
|
|
|
|
int timeout;
|
|
|
|
|
|
|
|
if (conn->pgtcp_user_timeout == NULL)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!parse_int_param(conn->pgtcp_user_timeout, &timeout, conn,
|
|
|
|
"tcp_user_timeout"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (timeout < 0)
|
|
|
|
timeout = 0;
|
|
|
|
|
|
|
|
#ifdef TCP_USER_TIMEOUT
|
|
|
|
if (setsockopt(conn->sock, IPPROTO_TCP, TCP_USER_TIMEOUT,
|
|
|
|
(char *) &timeout, sizeof(timeout)) < 0)
|
|
|
|
{
|
|
|
|
char sebuf[256];
|
|
|
|
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-04-23 14:18:11 +02:00
|
|
|
libpq_gettext("%s(%s) failed: %s\n"),
|
|
|
|
"setsockopt",
|
Add support TCP user timeout in libpq and the backend server
Similarly to the set of parameters for keepalive, a connection parameter
for libpq is added as well as a backend GUC, called tcp_user_timeout.
Increasing the TCP user timeout is useful to allow a connection to
survive extended periods without end-to-end connection, and decreasing
it allows application to fail faster. By default, the parameter is 0,
which makes the connection use the system default, and follows a logic
close to the keepalive parameters in its handling. When connecting
through a Unix-socket domain, the parameters have no effect.
Author: Ryohei Nagaura
Reviewed-by: Fabien Coelho, Robert Haas, Kyotaro Horiguchi, Kirk
Jamison, Mikalai Keida, Takayuki Tsunakawa, Andrei Yahorau
Discussion: https://postgr.es/m/EDA4195584F5064680D8130B1CA91C45367328@G01JPEXMBYT04
2019-04-06 08:23:37 +02:00
|
|
|
"TCP_USER_TIMEOUT",
|
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/* ----------
|
|
|
|
* connectDBStart -
|
2003-06-08 19:43:00 +02:00
|
|
|
* Begin the process of making a connection to the backend.
|
|
|
|
*
|
1999-11-30 04:08:19 +01:00
|
|
|
* Returns 1 if successful, 0 if not.
|
|
|
|
* ----------
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
connectDBStart(PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1999-11-30 04:08:19 +01:00
|
|
|
if (!conn)
|
|
|
|
return 0;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2006-02-13 23:33:57 +01:00
|
|
|
if (!conn->options_valid)
|
|
|
|
goto connect_errReturn;
|
|
|
|
|
2018-09-09 18:23:23 +02:00
|
|
|
/*
|
|
|
|
* Check for bad linking to backend-internal versions of src/common
|
|
|
|
* functions (see comments in link-canary.c for the reason we need this).
|
|
|
|
* Nobody but developers should see this message, so we don't bother
|
|
|
|
* translating it.
|
|
|
|
*/
|
|
|
|
if (!pg_link_canary_is_frontend())
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
"libpq is incorrectly linked to backend functions\n");
|
2018-09-09 18:23:23 +02:00
|
|
|
goto connect_errReturn;
|
|
|
|
}
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/* Ensure our buffers are empty */
|
|
|
|
conn->inStart = conn->inCursor = conn->inEnd = 0;
|
|
|
|
conn->outCount = 0;
|
|
|
|
|
2003-02-14 02:24:26 +01:00
|
|
|
/*
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
* Set up to try to connect to the first host. (Setting whichhost = -1 is
|
|
|
|
* a bit of a cheat, but PQconnectPoll will advance it to 0 before
|
|
|
|
* anything else looks at it.)
|
2003-02-14 02:24:26 +01:00
|
|
|
*/
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->whichhost = -1;
|
|
|
|
conn->try_next_addr = false;
|
|
|
|
conn->try_next_host = true;
|
2003-06-08 19:43:00 +02:00
|
|
|
conn->status = CONNECTION_NEEDED;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Also reset the target_server_type state if needed */
|
|
|
|
if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY_PASS2)
|
|
|
|
conn->target_server_type = SERVER_TYPE_PREFER_STANDBY;
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/*
|
2003-06-08 19:43:00 +02:00
|
|
|
* The code for processing CONNECTION_NEEDED state is in PQconnectPoll(),
|
|
|
|
* so that it can easily be re-executed if needed again during the
|
|
|
|
* asynchronous startup process. However, we must run it once here,
|
|
|
|
* because callers expect a success return from this routine to mean that
|
|
|
|
* we are in PGRES_POLLING_WRITING connection state.
|
1999-11-30 04:08:19 +01:00
|
|
|
*/
|
2003-06-08 19:43:00 +02:00
|
|
|
if (PQconnectPoll(conn) == PGRES_POLLING_WRITING)
|
|
|
|
return 1;
|
1998-01-26 02:42:53 +01:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
connect_errReturn:
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we managed to open a socket, close it immediately rather than
|
|
|
|
* waiting till PQfinish. (The application cannot have gotten the socket
|
|
|
|
* from PQsocket yet, so this doesn't risk breaking anything.)
|
|
|
|
*/
|
2015-11-12 19:03:52 +01:00
|
|
|
pqDropConnection(conn, true);
|
1999-11-30 04:08:19 +01:00
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
return 0;
|
|
|
|
}
|
1998-01-26 02:42:53 +01:00
|
|
|
|
1998-09-01 06:40:42 +02:00
|
|
|
|
2001-08-17 17:11:15 +02:00
|
|
|
/*
|
1999-11-30 04:08:19 +01:00
|
|
|
* connectDBComplete
|
|
|
|
*
|
|
|
|
* Block and complete a connection.
|
|
|
|
*
|
|
|
|
* Returns 1 on success, 0 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
connectDBComplete(PGconn *conn)
|
|
|
|
{
|
2000-01-16 22:18:52 +01:00
|
|
|
PostgresPollingStatusType flag = PGRES_POLLING_WRITING;
|
2002-12-19 20:30:24 +01:00
|
|
|
time_t finish_time = ((time_t) -1);
|
2017-05-19 22:19:51 +02:00
|
|
|
int timeout = 0;
|
Fix libpq's implementation of per-host connection timeouts.
Commit 5f374fe7a attempted to turn the connect_timeout from an overall
maximum time limit into a per-host limit, but it didn't do a great job of
that. The timer would only get restarted if we actually detected timeout
within connectDBComplete(), not if we changed our attention to a new host
for some other reason. In that case the old timeout continued to run,
possibly causing a premature timeout failure for the new host.
Fix that, and also tweak the logic so that if we do get a timeout,
we advance to the next available IP address, not to the next host name.
There doesn't seem to be a good reason to assume that all the IP
addresses supplied for a given host name will necessarily fail the
same way as the current one. Moreover, this conforms better to the
admittedly-vague documentation statement that the timeout is "per
connection attempt". I changed that to "per host name or IP address"
to be clearer. (Note that reconnections to the same server, such as for
switching protocol version or SSL status, don't get their own separate
timeout; that was true before and remains so.)
Also clarify documentation about the interpretation of connect_timeout
values less than 2.
This seems like a bug, so back-patch to v10 where this logic came in.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/5735.1533828184@sss.pgh.pa.us
2018-08-13 19:07:52 +02:00
|
|
|
int last_whichhost = -2; /* certainly different from whichhost */
|
|
|
|
struct addrinfo *last_addr_cur = NULL;
|
2002-08-17 14:33:18 +02:00
|
|
|
|
2000-01-16 22:18:52 +01:00
|
|
|
if (conn == NULL || conn->status == CONNECTION_BAD)
|
|
|
|
return 0;
|
1998-01-26 02:42:53 +01:00
|
|
|
|
2002-08-27 16:49:52 +02:00
|
|
|
/*
|
2002-10-25 01:35:55 +02:00
|
|
|
* Set up a time limit, if connect_timeout isn't zero.
|
2002-08-27 16:49:52 +02:00
|
|
|
*/
|
|
|
|
if (conn->connect_timeout != NULL)
|
2000-01-14 06:33:15 +01:00
|
|
|
{
|
2018-09-11 23:46:01 +02:00
|
|
|
if (!parse_int_param(conn->connect_timeout, &timeout, conn,
|
|
|
|
"connect_timeout"))
|
2019-10-21 04:39:15 +02:00
|
|
|
{
|
|
|
|
/* mark the connection as bad to report the parsing failure */
|
|
|
|
conn->status = CONNECTION_BAD;
|
2018-09-11 23:46:01 +02:00
|
|
|
return 0;
|
2019-10-21 04:39:15 +02:00
|
|
|
}
|
2018-09-11 23:46:01 +02:00
|
|
|
|
2002-10-25 01:35:55 +02:00
|
|
|
if (timeout > 0)
|
2002-08-27 16:49:52 +02:00
|
|
|
{
|
2002-12-19 20:30:24 +01:00
|
|
|
/*
|
Fix libpq's implementation of per-host connection timeouts.
Commit 5f374fe7a attempted to turn the connect_timeout from an overall
maximum time limit into a per-host limit, but it didn't do a great job of
that. The timer would only get restarted if we actually detected timeout
within connectDBComplete(), not if we changed our attention to a new host
for some other reason. In that case the old timeout continued to run,
possibly causing a premature timeout failure for the new host.
Fix that, and also tweak the logic so that if we do get a timeout,
we advance to the next available IP address, not to the next host name.
There doesn't seem to be a good reason to assume that all the IP
addresses supplied for a given host name will necessarily fail the
same way as the current one. Moreover, this conforms better to the
admittedly-vague documentation statement that the timeout is "per
connection attempt". I changed that to "per host name or IP address"
to be clearer. (Note that reconnections to the same server, such as for
switching protocol version or SSL status, don't get their own separate
timeout; that was true before and remains so.)
Also clarify documentation about the interpretation of connect_timeout
values less than 2.
This seems like a bug, so back-patch to v10 where this logic came in.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/5735.1533828184@sss.pgh.pa.us
2018-08-13 19:07:52 +02:00
|
|
|
* Rounding could cause connection to fail unexpectedly quickly;
|
|
|
|
* to prevent possibly waiting hardly-at-all, insist on at least
|
|
|
|
* two seconds.
|
2002-12-19 20:30:24 +01:00
|
|
|
*/
|
2002-10-25 01:35:55 +02:00
|
|
|
if (timeout < 2)
|
|
|
|
timeout = 2;
|
2002-08-27 16:49:52 +02:00
|
|
|
}
|
2018-09-11 23:46:01 +02:00
|
|
|
else /* negative means 0 */
|
|
|
|
timeout = 0;
|
2002-08-27 16:49:52 +02:00
|
|
|
}
|
|
|
|
|
2002-10-25 01:35:55 +02:00
|
|
|
for (;;)
|
2002-08-27 16:49:52 +02:00
|
|
|
{
|
2017-05-19 22:19:51 +02:00
|
|
|
int ret = 0;
|
|
|
|
|
Fix libpq's implementation of per-host connection timeouts.
Commit 5f374fe7a attempted to turn the connect_timeout from an overall
maximum time limit into a per-host limit, but it didn't do a great job of
that. The timer would only get restarted if we actually detected timeout
within connectDBComplete(), not if we changed our attention to a new host
for some other reason. In that case the old timeout continued to run,
possibly causing a premature timeout failure for the new host.
Fix that, and also tweak the logic so that if we do get a timeout,
we advance to the next available IP address, not to the next host name.
There doesn't seem to be a good reason to assume that all the IP
addresses supplied for a given host name will necessarily fail the
same way as the current one. Moreover, this conforms better to the
admittedly-vague documentation statement that the timeout is "per
connection attempt". I changed that to "per host name or IP address"
to be clearer. (Note that reconnections to the same server, such as for
switching protocol version or SSL status, don't get their own separate
timeout; that was true before and remains so.)
Also clarify documentation about the interpretation of connect_timeout
values less than 2.
This seems like a bug, so back-patch to v10 where this logic came in.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/5735.1533828184@sss.pgh.pa.us
2018-08-13 19:07:52 +02:00
|
|
|
/*
|
|
|
|
* (Re)start the connect_timeout timer if it's active and we are
|
|
|
|
* considering a different host than we were last time through. If
|
|
|
|
* we've already succeeded, though, needn't recalculate.
|
|
|
|
*/
|
|
|
|
if (flag != PGRES_POLLING_OK &&
|
|
|
|
timeout > 0 &&
|
|
|
|
(conn->whichhost != last_whichhost ||
|
|
|
|
conn->addr_cur != last_addr_cur))
|
|
|
|
{
|
|
|
|
finish_time = time(NULL) + timeout;
|
|
|
|
last_whichhost = conn->whichhost;
|
|
|
|
last_addr_cur = conn->addr_cur;
|
|
|
|
}
|
|
|
|
|
2000-01-16 22:18:52 +01:00
|
|
|
/*
|
|
|
|
* Wait, if necessary. Note that the initial state (just after
|
|
|
|
* PQconnectStart) is to wait for the socket to select for writing.
|
|
|
|
*/
|
2000-01-14 06:33:15 +01:00
|
|
|
switch (flag)
|
1998-01-26 02:42:53 +01:00
|
|
|
{
|
1999-11-30 04:08:19 +01:00
|
|
|
case PGRES_POLLING_OK:
|
2000-01-14 06:33:15 +01:00
|
|
|
return 1; /* success! */
|
2000-04-12 19:17:23 +02:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
case PGRES_POLLING_READING:
|
2017-05-19 22:19:51 +02:00
|
|
|
ret = pqWaitTimed(1, 0, conn, finish_time);
|
|
|
|
if (ret == -1)
|
2000-01-14 06:33:15 +01:00
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* hard failure, eg select() problem, aborts everything */
|
2000-01-14 06:33:15 +01:00
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
return 0;
|
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PGRES_POLLING_WRITING:
|
2017-05-19 22:19:51 +02:00
|
|
|
ret = pqWaitTimed(0, 1, conn, finish_time);
|
|
|
|
if (ret == -1)
|
2000-01-14 06:33:15 +01:00
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* hard failure, eg select() problem, aborts everything */
|
2000-01-14 06:33:15 +01:00
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
return 0;
|
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* Just in case we failed to set it in PQconnectPoll */
|
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
return 0;
|
1998-01-26 02:42:53 +01:00
|
|
|
}
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2017-05-19 22:19:51 +02:00
|
|
|
if (ret == 1) /* connect_timeout elapsed */
|
|
|
|
{
|
|
|
|
/*
|
Fix libpq's implementation of per-host connection timeouts.
Commit 5f374fe7a attempted to turn the connect_timeout from an overall
maximum time limit into a per-host limit, but it didn't do a great job of
that. The timer would only get restarted if we actually detected timeout
within connectDBComplete(), not if we changed our attention to a new host
for some other reason. In that case the old timeout continued to run,
possibly causing a premature timeout failure for the new host.
Fix that, and also tweak the logic so that if we do get a timeout,
we advance to the next available IP address, not to the next host name.
There doesn't seem to be a good reason to assume that all the IP
addresses supplied for a given host name will necessarily fail the
same way as the current one. Moreover, this conforms better to the
admittedly-vague documentation statement that the timeout is "per
connection attempt". I changed that to "per host name or IP address"
to be clearer. (Note that reconnections to the same server, such as for
switching protocol version or SSL status, don't get their own separate
timeout; that was true before and remains so.)
Also clarify documentation about the interpretation of connect_timeout
values less than 2.
This seems like a bug, so back-patch to v10 where this logic came in.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/5735.1533828184@sss.pgh.pa.us
2018-08-13 19:07:52 +02:00
|
|
|
* Give up on current server/address, try the next one.
|
2017-05-19 22:19:51 +02:00
|
|
|
*/
|
Fix libpq's implementation of per-host connection timeouts.
Commit 5f374fe7a attempted to turn the connect_timeout from an overall
maximum time limit into a per-host limit, but it didn't do a great job of
that. The timer would only get restarted if we actually detected timeout
within connectDBComplete(), not if we changed our attention to a new host
for some other reason. In that case the old timeout continued to run,
possibly causing a premature timeout failure for the new host.
Fix that, and also tweak the logic so that if we do get a timeout,
we advance to the next available IP address, not to the next host name.
There doesn't seem to be a good reason to assume that all the IP
addresses supplied for a given host name will necessarily fail the
same way as the current one. Moreover, this conforms better to the
admittedly-vague documentation statement that the timeout is "per
connection attempt". I changed that to "per host name or IP address"
to be clearer. (Note that reconnections to the same server, such as for
switching protocol version or SSL status, don't get their own separate
timeout; that was true before and remains so.)
Also clarify documentation about the interpretation of connect_timeout
values less than 2.
This seems like a bug, so back-patch to v10 where this logic came in.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/5735.1533828184@sss.pgh.pa.us
2018-08-13 19:07:52 +02:00
|
|
|
conn->try_next_addr = true;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->status = CONNECTION_NEEDED;
|
2017-05-19 22:19:51 +02:00
|
|
|
}
|
|
|
|
|
2000-01-16 22:18:52 +01:00
|
|
|
/*
|
|
|
|
* Now try to advance the state machine.
|
|
|
|
*/
|
|
|
|
flag = PQconnectPoll(conn);
|
2000-01-14 06:33:15 +01:00
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------
|
|
|
|
* PQconnectPoll
|
|
|
|
*
|
|
|
|
* Poll an asynchronous connection.
|
|
|
|
*
|
|
|
|
* Returns a PostgresPollingStatusType.
|
2002-08-27 18:21:51 +02:00
|
|
|
* Before calling this function, use select(2) to determine when data
|
|
|
|
* has arrived..
|
1999-11-30 04:08:19 +01:00
|
|
|
*
|
|
|
|
* You must call PQfinish whether or not this fails.
|
|
|
|
*
|
|
|
|
* This function and PQconnectStart are intended to allow connections to be
|
|
|
|
* made without blocking the execution of your program on remote I/O. However,
|
|
|
|
* there are a number of caveats:
|
|
|
|
*
|
|
|
|
* o If you call PQtrace, ensure that the stream object into which you trace
|
2000-01-14 06:33:15 +01:00
|
|
|
* will not block.
|
1999-11-30 04:08:19 +01:00
|
|
|
* o If you do not supply an IP address for the remote host (i.e. you
|
2003-06-08 19:43:00 +02:00
|
|
|
* supply a host name instead) then PQconnectStart will block on
|
2022-08-13 23:57:48 +02:00
|
|
|
* getaddrinfo. You will be fine if using Unix sockets (i.e. by
|
2000-01-14 06:33:15 +01:00
|
|
|
* supplying neither a host name nor a host address).
|
1999-11-30 04:08:19 +01:00
|
|
|
* o If your backend wants to use Kerberos authentication then you must
|
|
|
|
* supply both a host name and a host address, otherwise this function
|
|
|
|
* may block on gethostname.
|
|
|
|
*
|
2000-01-14 06:33:15 +01:00
|
|
|
* ----------------
|
|
|
|
*/
|
1999-11-30 04:08:19 +01:00
|
|
|
PostgresPollingStatusType
|
|
|
|
PQconnectPoll(PGconn *conn)
|
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
bool reset_connection_state_machine = false;
|
|
|
|
bool need_new_connection = false;
|
1999-11-30 04:08:19 +01:00
|
|
|
PGresult *res;
|
2018-09-26 18:35:57 +02:00
|
|
|
char sebuf[PG_STRERROR_R_BUFLEN];
|
2009-07-24 19:58:31 +02:00
|
|
|
int optval;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
if (conn == NULL)
|
|
|
|
return PGRES_POLLING_FAILED;
|
|
|
|
|
|
|
|
/* Get the new data */
|
|
|
|
switch (conn->status)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We really shouldn't have been polled in these two cases, but we
|
|
|
|
* can handle it.
|
|
|
|
*/
|
|
|
|
case CONNECTION_BAD:
|
|
|
|
return PGRES_POLLING_FAILED;
|
|
|
|
case CONNECTION_OK:
|
|
|
|
return PGRES_POLLING_OK;
|
|
|
|
|
|
|
|
/* These are reading states */
|
|
|
|
case CONNECTION_AWAITING_RESPONSE:
|
|
|
|
case CONNECTION_AUTH_OK:
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
case CONNECTION_CHECK_WRITABLE:
|
|
|
|
case CONNECTION_CONSUME:
|
|
|
|
case CONNECTION_CHECK_STANDBY:
|
1998-01-26 02:42:53 +01:00
|
|
|
{
|
1999-11-30 04:08:19 +01:00
|
|
|
/* Load waiting data */
|
|
|
|
int n = pqReadData(conn);
|
|
|
|
|
|
|
|
if (n < 0)
|
|
|
|
goto error_return;
|
|
|
|
if (n == 0)
|
|
|
|
return PGRES_POLLING_READING;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
break;
|
1998-01-26 02:42:53 +01:00
|
|
|
}
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/* These are writing states, so we just proceed. */
|
|
|
|
case CONNECTION_STARTED:
|
|
|
|
case CONNECTION_MADE:
|
|
|
|
break;
|
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/* Special cases: proceed without waiting. */
|
|
|
|
case CONNECTION_SSL_STARTUP:
|
|
|
|
case CONNECTION_NEEDED:
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
case CONNECTION_GSS_STARTUP:
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
case CONNECTION_CHECK_TARGET:
|
2003-06-08 19:43:00 +02:00
|
|
|
break;
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
default:
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
2001-07-15 15:45:04 +02:00
|
|
|
libpq_gettext("invalid connection state, probably indicative of memory corruption\n"));
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
keep_going: /* We will come back to here until there is
|
2003-06-08 19:43:00 +02:00
|
|
|
* nothing left to do. */
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
|
|
|
|
/* Time to advance to next address, or next host if no more addresses? */
|
|
|
|
if (conn->try_next_addr)
|
|
|
|
{
|
|
|
|
if (conn->addr_cur && conn->addr_cur->ai_next)
|
|
|
|
{
|
|
|
|
conn->addr_cur = conn->addr_cur->ai_next;
|
|
|
|
reset_connection_state_machine = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
conn->try_next_host = true;
|
|
|
|
conn->try_next_addr = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Time to advance to next connhost[] entry? */
|
|
|
|
if (conn->try_next_host)
|
|
|
|
{
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
pg_conn_host *ch;
|
|
|
|
struct addrinfo hint;
|
|
|
|
int thisport;
|
|
|
|
int ret;
|
|
|
|
char portstr[MAXPGPATH];
|
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
if (conn->whichhost + 1 < conn->nconnhost)
|
|
|
|
conn->whichhost++;
|
|
|
|
else
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
{
|
|
|
|
/*
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
* Oops, no more hosts.
|
|
|
|
*
|
|
|
|
* If we are trying to connect in "prefer-standby" mode, then drop
|
|
|
|
* the standby requirement and start over.
|
|
|
|
*
|
|
|
|
* Otherwise, an appropriate error message is already set up, so
|
|
|
|
* we just need to set the right status.
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
*/
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
if (conn->target_server_type == SERVER_TYPE_PREFER_STANDBY &&
|
|
|
|
conn->nconnhost > 0)
|
|
|
|
{
|
|
|
|
conn->target_server_type = SERVER_TYPE_PREFER_STANDBY_PASS2;
|
|
|
|
conn->whichhost = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
goto error_return;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
}
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
|
|
|
|
/* Drop any address info for previous host */
|
|
|
|
release_conn_addrinfo(conn);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up info for the new host. On failure, log the problem in
|
|
|
|
* conn->errorMessage, then loop around to try the next host. (Note
|
|
|
|
* we don't clear try_next_host until we've succeeded.)
|
|
|
|
*/
|
|
|
|
ch = &conn->connhost[conn->whichhost];
|
|
|
|
|
|
|
|
/* Initialize hint structure */
|
|
|
|
MemSet(&hint, 0, sizeof(hint));
|
|
|
|
hint.ai_socktype = SOCK_STREAM;
|
|
|
|
conn->addrlist_family = hint.ai_family = AF_UNSPEC;
|
|
|
|
|
|
|
|
/* Figure out the port number we're going to use. */
|
|
|
|
if (ch->port == NULL || ch->port[0] == '\0')
|
|
|
|
thisport = DEF_PGPORT;
|
|
|
|
else
|
|
|
|
{
|
2018-09-11 23:46:01 +02:00
|
|
|
if (!parse_int_param(ch->port, &thisport, conn, "port"))
|
|
|
|
goto error_return;
|
|
|
|
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
if (thisport < 1 || thisport > 65535)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("invalid port number: \"%s\"\n"),
|
|
|
|
ch->port);
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
snprintf(portstr, sizeof(portstr), "%d", thisport);
|
|
|
|
|
|
|
|
/* Use pg_getaddrinfo_all() to resolve the address */
|
|
|
|
switch (ch->type)
|
|
|
|
{
|
|
|
|
case CHT_HOST_NAME:
|
|
|
|
ret = pg_getaddrinfo_all(ch->host, portstr, &hint,
|
|
|
|
&conn->addrlist);
|
|
|
|
if (ret || !conn->addrlist)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("could not translate host name \"%s\" to address: %s\n"),
|
|
|
|
ch->host, gai_strerror(ret));
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CHT_HOST_ADDRESS:
|
|
|
|
hint.ai_flags = AI_NUMERICHOST;
|
|
|
|
ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint,
|
|
|
|
&conn->addrlist);
|
|
|
|
if (ret || !conn->addrlist)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("could not parse network address \"%s\": %s\n"),
|
|
|
|
ch->hostaddr, gai_strerror(ret));
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CHT_UNIX_SOCKET:
|
|
|
|
conn->addrlist_family = hint.ai_family = AF_UNIX;
|
|
|
|
UNIXSOCK_PATH(portstr, thisport, ch->host);
|
|
|
|
if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"),
|
|
|
|
portstr,
|
|
|
|
(int) (UNIXSOCK_PATH_BUFLEN - 1));
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NULL hostname tells pg_getaddrinfo_all to parse the service
|
|
|
|
* name as a Unix-domain socket path.
|
|
|
|
*/
|
|
|
|
ret = pg_getaddrinfo_all(NULL, portstr, &hint,
|
|
|
|
&conn->addrlist);
|
|
|
|
if (ret || !conn->addrlist)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"),
|
|
|
|
portstr, gai_strerror(ret));
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, scan this addrlist for a working server address */
|
|
|
|
conn->addr_cur = conn->addrlist;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
reset_connection_state_machine = true;
|
|
|
|
conn->try_next_host = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset connection state machine? */
|
|
|
|
if (reset_connection_state_machine)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* (Re) initialize our connection control variables for a set of
|
|
|
|
* connection attempts to a single server address. These variables
|
|
|
|
* must persist across individual connection attempts, but we must
|
|
|
|
* reset them when we start to consider a new server.
|
|
|
|
*/
|
|
|
|
conn->pversion = PG_PROTOCOL(3, 0);
|
|
|
|
conn->send_appname = true;
|
|
|
|
#ifdef USE_SSL
|
|
|
|
/* initialize these values based on SSL mode */
|
|
|
|
conn->allow_ssl_try = (conn->sslmode[0] != 'd'); /* "disable" */
|
|
|
|
conn->wait_ssl_try = (conn->sslmode[0] == 'a'); /* "allow" */
|
|
|
|
#endif
|
2020-07-13 17:57:55 +02:00
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
conn->try_gss = (conn->gssencmode[0] != 'd'); /* "disable" */
|
|
|
|
#endif
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
|
|
|
|
reset_connection_state_machine = false;
|
|
|
|
need_new_connection = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Force a new connection (perhaps to the same server as before)? */
|
|
|
|
if (need_new_connection)
|
|
|
|
{
|
|
|
|
/* Drop any existing connection */
|
|
|
|
pqDropConnection(conn, true);
|
|
|
|
|
|
|
|
/* Reset all state obtained from old server */
|
|
|
|
pqDropServerData(conn);
|
|
|
|
|
|
|
|
/* Drop any PGresult we might have, too */
|
|
|
|
conn->asyncStatus = PGASYNC_IDLE;
|
|
|
|
conn->xactStatus = PQTRANS_IDLE;
|
2021-03-15 22:13:42 +01:00
|
|
|
conn->pipelineStatus = PQ_PIPELINE_OFF;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
pqClearAsyncResult(conn);
|
|
|
|
|
|
|
|
/* Reset conn->status to put the state machine in the right state */
|
|
|
|
conn->status = CONNECTION_NEEDED;
|
|
|
|
|
|
|
|
need_new_connection = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now try to advance the state machine for this connection */
|
1999-11-30 04:08:19 +01:00
|
|
|
switch (conn->status)
|
|
|
|
{
|
2003-06-08 19:43:00 +02:00
|
|
|
case CONNECTION_NEEDED:
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Try to initiate a connection to one of the addresses
|
2005-10-17 18:24:20 +02:00
|
|
|
* returned by pg_getaddrinfo_all(). conn->addr_cur is the
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
* next one to try.
|
|
|
|
*
|
|
|
|
* The extra level of braces here is historical. It's not
|
|
|
|
* worth reindenting this whole switch case to remove 'em.
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
struct addrinfo *addr_cur = conn->addr_cur;
|
2018-11-19 18:34:12 +01:00
|
|
|
char host_addr[NI_MAXHOST];
|
2016-11-03 14:25:20 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance to next possible host, if we've tried all of
|
|
|
|
* the addresses for the current host.
|
|
|
|
*/
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
if (addr_cur == NULL)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->try_next_host = true;
|
|
|
|
goto keep_going;
|
2016-11-03 14:25:20 +01:00
|
|
|
}
|
2003-06-08 19:43:00 +02:00
|
|
|
|
2021-01-11 20:03:39 +01:00
|
|
|
/* Remember current address for possible use later */
|
2003-06-20 06:09:12 +02:00
|
|
|
memcpy(&conn->raddr.addr, addr_cur->ai_addr,
|
2003-06-08 19:43:00 +02:00
|
|
|
addr_cur->ai_addrlen);
|
2003-06-20 06:09:12 +02:00
|
|
|
conn->raddr.salen = addr_cur->ai_addrlen;
|
2003-06-08 19:43:00 +02:00
|
|
|
|
2021-01-11 20:03:39 +01:00
|
|
|
/*
|
|
|
|
* Set connip, too. Note we purposely ignore strdup
|
|
|
|
* failure; not a big problem if it fails.
|
|
|
|
*/
|
2018-11-19 18:34:12 +01:00
|
|
|
if (conn->connip != NULL)
|
|
|
|
{
|
|
|
|
free(conn->connip);
|
|
|
|
conn->connip = NULL;
|
|
|
|
}
|
|
|
|
getHostaddr(conn, host_addr, NI_MAXHOST);
|
2021-01-11 20:03:39 +01:00
|
|
|
if (host_addr[0])
|
2018-11-19 18:34:12 +01:00
|
|
|
conn->connip = strdup(host_addr);
|
2019-05-22 18:55:34 +02:00
|
|
|
|
2021-01-11 20:03:39 +01:00
|
|
|
/* Try to create the socket */
|
2014-04-17 01:46:51 +02:00
|
|
|
conn->sock = socket(addr_cur->ai_family, SOCK_STREAM, 0);
|
|
|
|
if (conn->sock == PGINVALID_SOCKET)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
2021-01-11 20:03:39 +01:00
|
|
|
int errorno = SOCK_ERRNO;
|
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
* Silently ignore socket() failure if we have more
|
|
|
|
* addresses to try; this reduces useless chatter in
|
|
|
|
* cases where the address list includes both IPv4 and
|
|
|
|
* IPv6 but kernel only accepts one family.
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
2016-11-03 14:25:20 +01:00
|
|
|
if (addr_cur->ai_next != NULL ||
|
|
|
|
conn->whichhost + 1 < conn->nconnhost)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->try_next_addr = true;
|
|
|
|
goto keep_going;
|
2003-06-08 19:43:00 +02:00
|
|
|
}
|
2021-01-21 22:10:18 +01:00
|
|
|
emitHostIdentityInfo(conn, host_addr);
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2003-06-08 19:43:00 +02:00
|
|
|
libpq_gettext("could not create socket: %s\n"),
|
2021-01-11 20:03:39 +01:00
|
|
|
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)));
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
goto error_return;
|
2003-06-08 19:43:00 +02:00
|
|
|
}
|
|
|
|
|
2021-01-11 20:03:39 +01:00
|
|
|
/*
|
|
|
|
* Once we've identified a target address, all errors
|
|
|
|
* except the preceding socket()-failure case should be
|
2021-01-21 22:10:18 +01:00
|
|
|
* prefixed with host-identity information. (If the
|
|
|
|
* connection succeeds, the contents of conn->errorMessage
|
|
|
|
* won't matter, so this is harmless.)
|
2021-01-11 20:03:39 +01:00
|
|
|
*/
|
2021-01-21 22:10:18 +01:00
|
|
|
emitHostIdentityInfo(conn, host_addr);
|
2021-01-11 20:03:39 +01:00
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
|
|
|
* Select socket options: no delay of outgoing data for
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
* TCP sockets, nonblock mode, close-on-exec. Try the
|
|
|
|
* next address if any of this fails.
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
2022-02-15 10:03:52 +01:00
|
|
|
if (addr_cur->ai_family != AF_UNIX)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
|
|
|
if (!connectNoDelay(conn))
|
2003-06-12 09:36:51 +02:00
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* error message already created */
|
|
|
|
conn->try_next_addr = true;
|
|
|
|
goto keep_going;
|
2003-06-12 09:36:51 +02:00
|
|
|
}
|
2003-06-08 19:43:00 +02:00
|
|
|
}
|
2005-03-25 01:34:31 +01:00
|
|
|
if (!pg_set_noblock(conn->sock))
|
2003-06-12 09:36:51 +02:00
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2013-04-19 05:35:19 +02:00
|
|
|
libpq_gettext("could not set socket to nonblocking mode: %s\n"),
|
2004-10-21 22:23:19 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->try_next_addr = true;
|
|
|
|
goto keep_going;
|
2004-10-21 22:23:19 +02:00
|
|
|
}
|
|
|
|
|
2017-04-22 08:06:16 +02:00
|
|
|
#ifdef F_SETFD
|
2004-10-21 22:23:19 +02:00
|
|
|
if (fcntl(conn->sock, F_SETFD, FD_CLOEXEC) == -1)
|
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2004-10-21 22:23:19 +02:00
|
|
|
libpq_gettext("could not set socket to close-on-exec mode: %s\n"),
|
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->try_next_addr = true;
|
|
|
|
goto keep_going;
|
2003-06-12 09:36:51 +02:00
|
|
|
}
|
2017-04-22 08:06:16 +02:00
|
|
|
#endif /* F_SETFD */
|
2003-08-04 02:43:34 +02:00
|
|
|
|
2022-02-15 10:03:52 +01:00
|
|
|
if (addr_cur->ai_family != AF_UNIX)
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
{
|
2011-04-19 13:54:48 +02:00
|
|
|
#ifndef WIN32
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
int on = 1;
|
2011-04-19 13:54:48 +02:00
|
|
|
#endif
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
int usekeepalives = useKeepalives(conn);
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (usekeepalives < 0)
|
|
|
|
{
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("keepalives parameter must be an integer\n"));
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
err = 1;
|
|
|
|
}
|
|
|
|
else if (usekeepalives == 0)
|
|
|
|
{
|
|
|
|
/* Do nothing */
|
|
|
|
}
|
2010-07-08 12:20:14 +02:00
|
|
|
#ifndef WIN32
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
else if (setsockopt(conn->sock,
|
|
|
|
SOL_SOCKET, SO_KEEPALIVE,
|
|
|
|
(char *) &on, sizeof(on)) < 0)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2021-04-23 14:18:11 +02:00
|
|
|
libpq_gettext("%s(%s) failed: %s\n"),
|
|
|
|
"setsockopt",
|
2017-06-28 18:30:16 +02:00
|
|
|
"SO_KEEPALIVE",
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
|
|
|
err = 1;
|
|
|
|
}
|
|
|
|
else if (!setKeepalivesIdle(conn)
|
|
|
|
|| !setKeepalivesInterval(conn)
|
|
|
|
|| !setKeepalivesCount(conn))
|
|
|
|
err = 1;
|
2010-07-08 12:20:14 +02:00
|
|
|
#else /* WIN32 */
|
2010-07-08 18:19:50 +02:00
|
|
|
#ifdef SIO_KEEPALIVE_VALS
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
else if (!prepKeepalivesWin32(conn))
|
2010-07-08 12:20:14 +02:00
|
|
|
err = 1;
|
2010-07-08 18:19:50 +02:00
|
|
|
#endif /* SIO_KEEPALIVE_VALS */
|
2010-07-08 12:20:14 +02:00
|
|
|
#endif /* WIN32 */
|
Add support TCP user timeout in libpq and the backend server
Similarly to the set of parameters for keepalive, a connection parameter
for libpq is added as well as a backend GUC, called tcp_user_timeout.
Increasing the TCP user timeout is useful to allow a connection to
survive extended periods without end-to-end connection, and decreasing
it allows application to fail faster. By default, the parameter is 0,
which makes the connection use the system default, and follows a logic
close to the keepalive parameters in its handling. When connecting
through a Unix-socket domain, the parameters have no effect.
Author: Ryohei Nagaura
Reviewed-by: Fabien Coelho, Robert Haas, Kyotaro Horiguchi, Kirk
Jamison, Mikalai Keida, Takayuki Tsunakawa, Andrei Yahorau
Discussion: https://postgr.es/m/EDA4195584F5064680D8130B1CA91C45367328@G01JPEXMBYT04
2019-04-06 08:23:37 +02:00
|
|
|
else if (!setTCPUserTimeout(conn))
|
|
|
|
err = 1;
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->try_next_addr = true;
|
|
|
|
goto keep_going;
|
Add TCP keepalive support to libpq.
This adds four additional connection parameters to libpq: keepalives,
keepalives_idle, keepalives_count, and keepalives_interval.
keepalives default to on, per discussion, but can be turned off by
specifying keepalives=0. The remaining parameters, where supported,
can be used to adjust how often keepalives are sent and how many
can be lost before the connection is broken.
The immediate motivation for this patch is to make sure that
walreceiver will eventually notice if the master reboots without
closing the connection cleanly, but it should be helpful in other
cases as well.
Tollef Fog Heen, Fujii Masao, and me.
2010-06-23 23:54:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-24 19:58:31 +02:00
|
|
|
/*----------
|
|
|
|
* We have three methods of blocking SIGPIPE during
|
|
|
|
* send() calls to this socket:
|
|
|
|
*
|
|
|
|
* - setsockopt(sock, SO_NOSIGPIPE)
|
|
|
|
* - send(sock, ..., MSG_NOSIGNAL)
|
|
|
|
* - setting the signal mask to SIG_IGN during send()
|
|
|
|
*
|
|
|
|
* The third method requires three syscalls per send,
|
|
|
|
* so we prefer either of the first two, but they are
|
|
|
|
* less portable. The state is tracked in the following
|
|
|
|
* members of PGconn:
|
|
|
|
*
|
|
|
|
* conn->sigpipe_so - we have set up SO_NOSIGPIPE
|
|
|
|
* conn->sigpipe_flag - we're specifying MSG_NOSIGNAL
|
|
|
|
*
|
|
|
|
* If we can use SO_NOSIGPIPE, then set sigpipe_so here
|
|
|
|
* and we're done. Otherwise, set sigpipe_flag so that
|
|
|
|
* we will try MSG_NOSIGNAL on sends. If we get an error
|
|
|
|
* with MSG_NOSIGNAL, we'll clear that flag and revert to
|
|
|
|
* signal masking.
|
|
|
|
*----------
|
|
|
|
*/
|
|
|
|
conn->sigpipe_so = false;
|
|
|
|
#ifdef MSG_NOSIGNAL
|
|
|
|
conn->sigpipe_flag = true;
|
|
|
|
#else
|
|
|
|
conn->sigpipe_flag = false;
|
|
|
|
#endif /* MSG_NOSIGNAL */
|
|
|
|
|
|
|
|
#ifdef SO_NOSIGPIPE
|
|
|
|
optval = 1;
|
|
|
|
if (setsockopt(conn->sock, SOL_SOCKET, SO_NOSIGPIPE,
|
|
|
|
(char *) &optval, sizeof(optval)) == 0)
|
|
|
|
{
|
|
|
|
conn->sigpipe_so = true;
|
|
|
|
conn->sigpipe_flag = false;
|
|
|
|
}
|
|
|
|
#endif /* SO_NOSIGPIPE */
|
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
|
|
|
* Start/make connection. This should not block, since we
|
|
|
|
* are in nonblock mode. If it does, well, too bad.
|
|
|
|
*/
|
|
|
|
if (connect(conn->sock, addr_cur->ai_addr,
|
|
|
|
addr_cur->ai_addrlen) < 0)
|
|
|
|
{
|
|
|
|
if (SOCK_ERRNO == EINPROGRESS ||
|
Expect EWOULDBLOCK from a non-blocking connect() call only on Windows.
On Unix-ish platforms, EWOULDBLOCK may be the same as EAGAIN, which is
*not* a success return, at least not on Linux. We need to treat it as a
failure to avoid giving a misleading error message. Per the Single Unix
Spec, only EINPROGRESS and EINTR returns indicate that the connection
attempt is in progress.
On Windows, on the other hand, EWOULDBLOCK (WSAEWOULDBLOCK) is the expected
case. We must accept EINPROGRESS as well because Cygwin will return that,
and it doesn't seem worth distinguishing Cygwin from native Windows here.
It's not very clear whether EINTR can occur on Windows, but let's leave
that part of the logic alone in the absence of concrete trouble reports.
Also, remove the test for errno == 0, effectively reverting commit
da9501bddb42222dc33c031b1db6ce2133bcee7b, which AFAICS was just a thinko;
or at best it might have been a workaround for a platform-specific bug,
which we can hope is gone now thirteen years later. In any case, since
libpq makes no effort to reset errno to zero before calling connect(),
it seems unlikely that that test has ever reliably done anything useful.
Andres Freund and Tom Lane
2013-06-27 18:36:44 +02:00
|
|
|
#ifdef WIN32
|
2003-06-08 19:43:00 +02:00
|
|
|
SOCK_ERRNO == EWOULDBLOCK ||
|
Expect EWOULDBLOCK from a non-blocking connect() call only on Windows.
On Unix-ish platforms, EWOULDBLOCK may be the same as EAGAIN, which is
*not* a success return, at least not on Linux. We need to treat it as a
failure to avoid giving a misleading error message. Per the Single Unix
Spec, only EINPROGRESS and EINTR returns indicate that the connection
attempt is in progress.
On Windows, on the other hand, EWOULDBLOCK (WSAEWOULDBLOCK) is the expected
case. We must accept EINPROGRESS as well because Cygwin will return that,
and it doesn't seem worth distinguishing Cygwin from native Windows here.
It's not very clear whether EINTR can occur on Windows, but let's leave
that part of the logic alone in the absence of concrete trouble reports.
Also, remove the test for errno == 0, effectively reverting commit
da9501bddb42222dc33c031b1db6ce2133bcee7b, which AFAICS was just a thinko;
or at best it might have been a workaround for a platform-specific bug,
which we can hope is gone now thirteen years later. In any case, since
libpq makes no effort to reset errno to zero before calling connect(),
it seems unlikely that that test has ever reliably done anything useful.
Andres Freund and Tom Lane
2013-06-27 18:36:44 +02:00
|
|
|
#endif
|
|
|
|
SOCK_ERRNO == EINTR)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This is fine - we're in non-blocking mode, and
|
|
|
|
* the connection is in progress. Tell caller to
|
|
|
|
* wait for write-ready on socket.
|
|
|
|
*/
|
|
|
|
conn->status = CONNECTION_STARTED;
|
|
|
|
return PGRES_POLLING_WRITING;
|
|
|
|
}
|
|
|
|
/* otherwise, trouble */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Hm, we're connected already --- seems the "nonblock
|
|
|
|
* connection" wasn't. Advance the state machine and
|
|
|
|
* go do the next stuff.
|
|
|
|
*/
|
|
|
|
conn->status = CONNECTION_STARTED;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
2003-08-04 02:43:34 +02:00
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
* This connection failed. Add the error report to
|
|
|
|
* conn->errorMessage, then try the next address if any.
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
|
|
|
connectFailureMessage(conn, SOCK_ERRNO);
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->try_next_addr = true;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
2003-06-08 19:43:00 +02:00
|
|
|
}
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
case CONNECTION_STARTED:
|
|
|
|
{
|
2021-11-09 15:20:47 +01:00
|
|
|
socklen_t optlen = sizeof(optval);
|
2000-04-12 19:17:23 +02:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/*
|
|
|
|
* Write ready, since we've made it here, so the connection
|
2003-06-08 19:43:00 +02:00
|
|
|
* has been made ... or has failed.
|
1999-11-30 04:08:19 +01:00
|
|
|
*/
|
1998-01-26 02:42:53 +01:00
|
|
|
|
2000-01-14 06:33:15 +01:00
|
|
|
/*
|
|
|
|
* Now check (using getsockopt) that there is not an error
|
|
|
|
* state waiting for us on the socket.
|
|
|
|
*/
|
2000-04-12 19:17:23 +02:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
if (getsockopt(conn->sock, SOL_SOCKET, SO_ERROR,
|
2000-06-14 20:18:01 +02:00
|
|
|
(char *) &optval, &optlen) == -1)
|
2000-04-12 19:17:23 +02:00
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2001-07-15 15:45:04 +02:00
|
|
|
libpq_gettext("could not get socket error status: %s\n"),
|
2003-06-14 19:49:54 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
2000-04-12 19:17:23 +02:00
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
else if (optval != 0)
|
2000-04-12 19:17:23 +02:00
|
|
|
{
|
|
|
|
/*
|
1999-11-30 04:08:19 +01:00
|
|
|
* When using a nonblocking connect, we will typically see
|
2000-01-14 06:33:15 +01:00
|
|
|
* connect failures at this point, so provide a friendly
|
1999-11-30 04:08:19 +01:00
|
|
|
* error message.
|
2000-04-12 19:17:23 +02:00
|
|
|
*/
|
2001-07-15 15:45:04 +02:00
|
|
|
connectFailureMessage(conn, optval);
|
2003-08-04 02:43:34 +02:00
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
* Try the next address if any, just as in the case where
|
|
|
|
* connect() returned failure immediately.
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->try_next_addr = true;
|
|
|
|
goto keep_going;
|
2000-04-12 19:17:23 +02:00
|
|
|
}
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/* Fill in the client address */
|
2003-06-12 09:36:51 +02:00
|
|
|
conn->laddr.salen = sizeof(conn->laddr.addr);
|
|
|
|
if (getsockname(conn->sock,
|
|
|
|
(struct sockaddr *) &conn->laddr.addr,
|
|
|
|
&conn->laddr.salen) < 0)
|
2000-04-12 19:17:23 +02:00
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2003-06-12 09:36:51 +02:00
|
|
|
libpq_gettext("could not get client address from socket: %s\n"),
|
2003-06-14 19:49:54 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
2000-04-12 19:17:23 +02:00
|
|
|
}
|
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
|
|
|
* Make sure we can write before advancing to next step.
|
|
|
|
*/
|
1999-11-30 04:08:19 +01:00
|
|
|
conn->status = CONNECTION_MADE;
|
|
|
|
return PGRES_POLLING_WRITING;
|
|
|
|
}
|
|
|
|
|
|
|
|
case CONNECTION_MADE:
|
|
|
|
{
|
2003-04-18 00:26:02 +02:00
|
|
|
char *startpacket;
|
|
|
|
int packetlen;
|
1999-11-30 04:08:19 +01:00
|
|
|
|
Replace use of credential control messages with getsockopt(LOCAL_PEERCRED).
It turns out the reason we hadn't found out about the portability issues
with our credential-control-message code is that almost no modern platforms
use that code at all; the ones that used to need it now offer getpeereid(),
which we choose first. The last holdout was NetBSD, and they added
getpeereid() as of 5.0. So far as I can tell, the only live platform on
which that code was being exercised was Debian/kFreeBSD, ie, FreeBSD kernel
with Linux userland --- since glibc doesn't provide getpeereid(), we fell
back to the control message code. However, the FreeBSD kernel provides a
LOCAL_PEERCRED socket parameter that's functionally equivalent to Linux's
SO_PEERCRED. That is both much simpler to use than control messages, and
superior because it doesn't require receiving a message from the other end
at just the right time.
Therefore, add code to use LOCAL_PEERCRED when necessary, and rip out all
the credential-control-message code in the backend. (libpq still has such
code so that it can still talk to pre-9.1 servers ... but eventually we can
get rid of it there too.) Clean up related autoconf probes, too.
This means that libpq's requirepeer parameter now works on exactly the same
platforms where the backend supports peer authentication, so adjust the
documentation accordingly.
2011-05-31 22:10:46 +02:00
|
|
|
/*
|
|
|
|
* Implement requirepeer check, if requested and it's a
|
|
|
|
* Unix-domain socket.
|
|
|
|
*/
|
|
|
|
if (conn->requirepeer && conn->requirepeer[0] &&
|
2022-02-15 10:03:52 +01:00
|
|
|
conn->raddr.addr.ss_family == AF_UNIX)
|
2010-07-18 13:37:26 +02:00
|
|
|
{
|
2019-10-30 12:58:32 +01:00
|
|
|
#ifndef WIN32
|
2022-01-11 19:46:12 +01:00
|
|
|
char *remote_username;
|
2019-10-30 12:58:32 +01:00
|
|
|
#endif
|
2010-07-18 13:37:26 +02:00
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
|
|
|
|
|
|
|
errno = 0;
|
2010-07-18 18:42:20 +02:00
|
|
|
if (getpeereid(conn->sock, &uid, &gid) != 0)
|
2010-07-18 13:37:26 +02:00
|
|
|
{
|
2011-06-02 19:05:01 +02:00
|
|
|
/*
|
|
|
|
* Provide special error message if getpeereid is a
|
|
|
|
* stub
|
|
|
|
*/
|
|
|
|
if (errno == ENOSYS)
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("requirepeer parameter is not supported on this platform\n"));
|
2011-06-02 19:05:01 +02:00
|
|
|
else
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("could not get peer credentials: %s\n"),
|
2018-09-26 18:35:57 +02:00
|
|
|
strerror_r(errno, sebuf, sizeof(sebuf)));
|
2010-07-18 13:37:26 +02:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
2019-10-30 12:58:32 +01:00
|
|
|
#ifndef WIN32
|
2022-01-11 19:46:12 +01:00
|
|
|
remote_username = pg_fe_getusername(uid,
|
|
|
|
&conn->errorMessage);
|
|
|
|
if (remote_username == NULL)
|
|
|
|
goto error_return; /* message already logged */
|
2010-07-18 13:37:26 +02:00
|
|
|
|
2022-01-11 19:46:12 +01:00
|
|
|
if (strcmp(remote_username, conn->requirepeer) != 0)
|
2010-07-18 13:37:26 +02:00
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2010-07-18 19:08:11 +02:00
|
|
|
libpq_gettext("requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n"),
|
2022-01-11 19:46:12 +01:00
|
|
|
conn->requirepeer, remote_username);
|
|
|
|
free(remote_username);
|
2010-07-18 13:37:26 +02:00
|
|
|
goto error_return;
|
|
|
|
}
|
2022-01-11 19:46:12 +01:00
|
|
|
free(remote_username);
|
2019-10-30 12:58:32 +01:00
|
|
|
#else /* WIN32 */
|
|
|
|
/* should have failed with ENOSYS above */
|
|
|
|
Assert(false);
|
|
|
|
#endif /* WIN32 */
|
2010-07-18 13:37:26 +02:00
|
|
|
}
|
|
|
|
|
2022-02-15 10:03:52 +01:00
|
|
|
if (conn->raddr.addr.ss_family == AF_UNIX)
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
{
|
|
|
|
/* Don't request SSL or GSSAPI over Unix sockets */
|
2003-06-08 19:43:00 +02:00
|
|
|
#ifdef USE_SSL
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
conn->allow_ssl_try = false;
|
|
|
|
#endif
|
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
conn->try_gss = false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef ENABLE_GSS
|
2003-08-04 02:43:34 +02:00
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
2020-01-08 16:57:09 +01:00
|
|
|
* If GSSAPI encryption is enabled, then call
|
|
|
|
* pg_GSS_have_cred_cache() which will return true if we can
|
|
|
|
* acquire credentials (and give us a handle to use in
|
|
|
|
* conn->gcred), and then send a packet to the server asking
|
|
|
|
* for GSSAPI Encryption (and skip past SSL negotiation and
|
|
|
|
* regular startup below).
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
if (conn->try_gss && !conn->gctx)
|
2019-09-06 09:15:35 +02:00
|
|
|
conn->try_gss = pg_GSS_have_cred_cache(&conn->gcred);
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
if (conn->try_gss && !conn->gctx)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
ProtocolVersion pv = pg_hton32(NEGOTIATE_GSS_CODE);
|
|
|
|
|
|
|
|
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("could not send GSSAPI negotiation packet: %s\n"),
|
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ok, wait for response */
|
|
|
|
conn->status = CONNECTION_GSS_STARTUP;
|
|
|
|
return PGRES_POLLING_READING;
|
2003-06-08 19:43:00 +02:00
|
|
|
}
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
else if (!conn->gctx && conn->gssencmode[0] == 'r')
|
|
|
|
{
|
2019-07-04 03:01:13 +02:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
2019-09-06 16:12:28 +02:00
|
|
|
libpq_gettext("GSSAPI encryption required but was impossible (possibly no credential cache, no server support, or using a local socket)\n"));
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_SSL
|
|
|
|
|
Set libcrypto callbacks for all connection threads in libpq
Based on an analysis of the OpenSSL code with Jacob, moving to EVP for
the cryptohash computations makes necessary the setup of the libcrypto
callbacks that were getting set only for SSL connections, but not for
connections without SSL. Not setting the callbacks makes the use of
threads potentially unsafe for connections calling cryptohashes during
authentication, like MD5 or SCRAM, if a failure happens during a
cryptohash computation. The logic setting the libssl and libcrypto
states is then split into two parts, both using the same locking, with
libcrypto being set up for SSL and non-SSL connections, while SSL
connections set any libssl state afterwards as needed.
Prior to this commit, only SSL connections would have set libcrypto
callbacks that are necessary to ensure a proper thread locking when
using multiple concurrent threads in libpq (ENABLE_THREAD_SAFETY). Note
that this is only required for OpenSSL 1.0.2 and 1.0.1 (oldest version
supported on HEAD), as 1.1.0 has its own internal locking and it has
dropped support for CRYPTO_set_locking_callback().
Tests with up to 300 threads with OpenSSL 1.0.1 and 1.0.2, mixing SSL
and non-SSL connection threads did not show any performance impact after
some micro-benchmarking. pgbench can be used here with -C and a
mostly-empty script (with one \set meta-command for example) to stress
authentication requests, and we have mixed that with some custom
programs for testing.
Reported-by: Jacob Champion
Author: Michael Paquier
Reviewed-by: Jacob Champion
Discussion: https://postgr.es/m/fd3ba610085f1ff54623478cf2f7adf5af193cbb.camel@vmware.com
2021-03-11 09:14:25 +01:00
|
|
|
/*
|
|
|
|
* Enable the libcrypto callbacks before checking if SSL needs
|
|
|
|
* to be done. This is done before sending the startup packet
|
|
|
|
* as depending on the type of authentication done, like MD5
|
|
|
|
* or SCRAM that use cryptohashes, the callbacks would be
|
|
|
|
* required even without a SSL connection
|
|
|
|
*/
|
|
|
|
if (pqsecure_initialize(conn, false, true) < 0)
|
|
|
|
goto error_return;
|
|
|
|
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
/*
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
* If SSL is enabled and we haven't already got encryption of
|
|
|
|
* some sort running, request SSL instead of sending the
|
|
|
|
* startup message.
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
*/
|
2003-08-01 23:27:27 +02:00
|
|
|
if (conn->allow_ssl_try && !conn->wait_ssl_try &&
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
!conn->ssl_in_use
|
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
&& !conn->gssenc
|
|
|
|
#endif
|
|
|
|
)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
|
|
|
ProtocolVersion pv;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send the SSL request packet.
|
|
|
|
*
|
|
|
|
* Theoretically, this could block, but it really
|
|
|
|
* shouldn't since we only got here if the socket is
|
|
|
|
* write-ready.
|
|
|
|
*/
|
2017-10-02 00:36:14 +02:00
|
|
|
pv = pg_hton32(NEGOTIATE_SSL_CODE);
|
2003-06-08 19:43:00 +02:00
|
|
|
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
|
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2003-06-08 19:43:00 +02:00
|
|
|
libpq_gettext("could not send SSL negotiation packet: %s\n"),
|
2003-06-14 19:49:54 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
2003-06-08 19:43:00 +02:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
/* Ok, wait for response */
|
|
|
|
conn->status = CONNECTION_SSL_STARTUP;
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
#endif /* USE_SSL */
|
|
|
|
|
2000-04-12 19:17:23 +02:00
|
|
|
/*
|
2003-04-18 00:26:02 +02:00
|
|
|
* Build the startup packet.
|
2000-04-12 19:17:23 +02:00
|
|
|
*/
|
2021-03-04 09:45:55 +01:00
|
|
|
startpacket = pqBuildStartupPacket3(conn, &packetlen,
|
|
|
|
EnvironmentOptions);
|
2003-04-18 00:26:02 +02:00
|
|
|
if (!startpacket)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2003-04-18 00:26:02 +02:00
|
|
|
goto error_return;
|
|
|
|
}
|
1998-01-26 02:42:53 +01:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/*
|
2000-01-14 06:33:15 +01:00
|
|
|
* Send the startup packet.
|
2000-04-12 19:17:23 +02:00
|
|
|
*
|
2000-01-14 06:33:15 +01:00
|
|
|
* Theoretically, this could block, but it really shouldn't
|
|
|
|
* since we only got here if the socket is write-ready.
|
1999-11-30 04:08:19 +01:00
|
|
|
*/
|
2003-04-18 00:26:02 +02:00
|
|
|
if (pqPacketSend(conn, 0, startpacket, packetlen) != STATUS_OK)
|
2000-04-12 19:17:23 +02:00
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2001-07-15 15:45:04 +02:00
|
|
|
libpq_gettext("could not send startup packet: %s\n"),
|
2003-06-14 19:49:54 +02:00
|
|
|
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
|
2003-04-18 00:26:02 +02:00
|
|
|
free(startpacket);
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
2000-04-12 19:17:23 +02:00
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
|
2003-04-18 00:26:02 +02:00
|
|
|
free(startpacket);
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
conn->status = CONNECTION_AWAITING_RESPONSE;
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-06-08 19:43:00 +02:00
|
|
|
* Handle SSL negotiation: wait for postmaster messages and
|
|
|
|
* respond as necessary.
|
|
|
|
*/
|
|
|
|
case CONNECTION_SSL_STARTUP:
|
|
|
|
{
|
|
|
|
#ifdef USE_SSL
|
|
|
|
PostgresPollingStatusType pollres;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On first time through, get the postmaster's response to our
|
2005-01-06 21:06:58 +01:00
|
|
|
* SSL negotiation packet.
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
Break out OpenSSL-specific code to separate files.
This refactoring is in preparation for adding support for other SSL
implementations, with no user-visible effects. There are now two #defines,
USE_OPENSSL which is defined when building with OpenSSL, and USE_SSL which
is defined when building with any SSL implementation. Currently, OpenSSL is
the only implementation so the two #defines go together, but USE_SSL is
supposed to be used for implementation-independent code.
The libpq SSL code is changed to use a custom BIO, which does all the raw
I/O, like we've been doing in the backend for a long time. That makes it
possible to use MSG_NOSIGNAL to block SIGPIPE when using SSL, which avoids
a couple of syscall for each send(). Probably doesn't make much performance
difference in practice - the SSL encryption is expensive enough to mask the
effect - but it was a natural result of this refactoring.
Based on a patch by Martijn van Oosterhout from 2006. Briefly reviewed by
Alvaro Herrera, Andreas Karlsson, Jeff Janes.
2014-08-11 10:54:19 +02:00
|
|
|
if (!conn->ssl_in_use)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
2005-01-06 21:06:58 +01:00
|
|
|
/*
|
|
|
|
* We use pqReadData here since it has the logic to
|
|
|
|
* distinguish no-data-yet from connection closure. Since
|
|
|
|
* conn->ssl isn't set, a plain recv() will occur.
|
|
|
|
*/
|
2003-06-08 19:43:00 +02:00
|
|
|
char SSLok;
|
2005-01-06 21:06:58 +01:00
|
|
|
int rdresult;
|
2003-06-08 19:43:00 +02:00
|
|
|
|
2005-01-06 21:06:58 +01:00
|
|
|
rdresult = pqReadData(conn);
|
|
|
|
if (rdresult < 0)
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
2005-01-06 21:06:58 +01:00
|
|
|
/* errorMessage is already filled in */
|
2003-06-08 19:43:00 +02:00
|
|
|
goto error_return;
|
|
|
|
}
|
2005-01-06 21:06:58 +01:00
|
|
|
if (rdresult == 0)
|
|
|
|
{
|
2003-06-08 19:43:00 +02:00
|
|
|
/* caller failed to wait for data */
|
|
|
|
return PGRES_POLLING_READING;
|
2005-01-06 21:06:58 +01:00
|
|
|
}
|
|
|
|
if (pqGetc(&SSLok, conn) < 0)
|
|
|
|
{
|
|
|
|
/* should not happen really */
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
2003-06-08 19:43:00 +02:00
|
|
|
if (SSLok == 'S')
|
|
|
|
{
|
Don't assume that "E" response to NEGOTIATE_SSL_CODE means pre-7.0 server.
These days, such a response is far more likely to signify a server-side
problem, such as fork failure. Reporting "server does not support SSL"
(in sslmode=require) could be quite misleading. But the results could
be even worse in sslmode=prefer: if the problem was transient and the
next connection attempt succeeds, we'll have silently fallen back to
protocol version 2.0, possibly disabling features the user needs.
Hence, it seems best to just eliminate the assumption that backing off
to non-SSL/2.0 protocol is the way to recover from an "E" response, and
instead treat the server error the same as we would in non-SSL cases.
I tested this change against a pre-7.0 server, and found that there
was a second logic bug in the "prefer" path: the test to decide whether
to make a fallback connection attempt assumed that we must have opened
conn->ssl, which in fact does not happen given an "E" response. After
fixing that, the code does indeed connect successfully to pre-7.0,
as long as you didn't set sslmode=require. (If you did, you get
"Unsupported frontend protocol", which isn't completely off base
given the server certainly doesn't support SSL.)
Since there seems no reason to believe that pre-7.0 servers exist anymore
in the wild, back-patch to all supported branches.
2011-08-27 22:36:57 +02:00
|
|
|
/* mark byte consumed */
|
|
|
|
conn->inStart = conn->inCursor;
|
Set libcrypto callbacks for all connection threads in libpq
Based on an analysis of the OpenSSL code with Jacob, moving to EVP for
the cryptohash computations makes necessary the setup of the libcrypto
callbacks that were getting set only for SSL connections, but not for
connections without SSL. Not setting the callbacks makes the use of
threads potentially unsafe for connections calling cryptohashes during
authentication, like MD5 or SCRAM, if a failure happens during a
cryptohash computation. The logic setting the libssl and libcrypto
states is then split into two parts, both using the same locking, with
libcrypto being set up for SSL and non-SSL connections, while SSL
connections set any libssl state afterwards as needed.
Prior to this commit, only SSL connections would have set libcrypto
callbacks that are necessary to ensure a proper thread locking when
using multiple concurrent threads in libpq (ENABLE_THREAD_SAFETY). Note
that this is only required for OpenSSL 1.0.2 and 1.0.1 (oldest version
supported on HEAD), as 1.1.0 has its own internal locking and it has
dropped support for CRYPTO_set_locking_callback().
Tests with up to 300 threads with OpenSSL 1.0.1 and 1.0.2, mixing SSL
and non-SSL connection threads did not show any performance impact after
some micro-benchmarking. pgbench can be used here with -C and a
mostly-empty script (with one \set meta-command for example) to stress
authentication requests, and we have mixed that with some custom
programs for testing.
Reported-by: Jacob Champion
Author: Michael Paquier
Reviewed-by: Jacob Champion
Discussion: https://postgr.es/m/fd3ba610085f1ff54623478cf2f7adf5af193cbb.camel@vmware.com
2021-03-11 09:14:25 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up global SSL state if required. The crypto
|
|
|
|
* state has already been set if libpq took care of
|
|
|
|
* doing that, so there is no need to make that happen
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (pqsecure_initialize(conn, true, false) != 0)
|
2003-06-08 19:43:00 +02:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
else if (SSLok == 'N')
|
|
|
|
{
|
Don't assume that "E" response to NEGOTIATE_SSL_CODE means pre-7.0 server.
These days, such a response is far more likely to signify a server-side
problem, such as fork failure. Reporting "server does not support SSL"
(in sslmode=require) could be quite misleading. But the results could
be even worse in sslmode=prefer: if the problem was transient and the
next connection attempt succeeds, we'll have silently fallen back to
protocol version 2.0, possibly disabling features the user needs.
Hence, it seems best to just eliminate the assumption that backing off
to non-SSL/2.0 protocol is the way to recover from an "E" response, and
instead treat the server error the same as we would in non-SSL cases.
I tested this change against a pre-7.0 server, and found that there
was a second logic bug in the "prefer" path: the test to decide whether
to make a fallback connection attempt assumed that we must have opened
conn->ssl, which in fact does not happen given an "E" response. After
fixing that, the code does indeed connect successfully to pre-7.0,
as long as you didn't set sslmode=require. (If you did, you get
"Unsupported frontend protocol", which isn't completely off base
given the server certainly doesn't support SSL.)
Since there seems no reason to believe that pre-7.0 servers exist anymore
in the wild, back-patch to all supported branches.
2011-08-27 22:36:57 +02:00
|
|
|
/* mark byte consumed */
|
|
|
|
conn->inStart = conn->inCursor;
|
|
|
|
/* OK to do without SSL? */
|
2009-04-24 11:43:10 +02:00
|
|
|
if (conn->sslmode[0] == 'r' || /* "require" */
|
|
|
|
conn->sslmode[0] == 'v') /* "verify-ca" or
|
|
|
|
* "verify-full" */
|
2003-08-01 23:27:27 +02:00
|
|
|
{
|
|
|
|
/* Require SSL, but server does not want it */
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("server does not support SSL, but SSL was required\n"));
|
2003-08-01 23:27:27 +02:00
|
|
|
goto error_return;
|
2003-06-08 19:43:00 +02:00
|
|
|
}
|
|
|
|
/* Otherwise, proceed with normal startup */
|
|
|
|
conn->allow_ssl_try = false;
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
/* We can proceed using this connection */
|
2003-06-08 19:43:00 +02:00
|
|
|
conn->status = CONNECTION_MADE;
|
|
|
|
return PGRES_POLLING_WRITING;
|
|
|
|
}
|
|
|
|
else if (SSLok == 'E')
|
|
|
|
{
|
Don't assume that "E" response to NEGOTIATE_SSL_CODE means pre-7.0 server.
These days, such a response is far more likely to signify a server-side
problem, such as fork failure. Reporting "server does not support SSL"
(in sslmode=require) could be quite misleading. But the results could
be even worse in sslmode=prefer: if the problem was transient and the
next connection attempt succeeds, we'll have silently fallen back to
protocol version 2.0, possibly disabling features the user needs.
Hence, it seems best to just eliminate the assumption that backing off
to non-SSL/2.0 protocol is the way to recover from an "E" response, and
instead treat the server error the same as we would in non-SSL cases.
I tested this change against a pre-7.0 server, and found that there
was a second logic bug in the "prefer" path: the test to decide whether
to make a fallback connection attempt assumed that we must have opened
conn->ssl, which in fact does not happen given an "E" response. After
fixing that, the code does indeed connect successfully to pre-7.0,
as long as you didn't set sslmode=require. (If you did, you get
"Unsupported frontend protocol", which isn't completely off base
given the server certainly doesn't support SSL.)
Since there seems no reason to believe that pre-7.0 servers exist anymore
in the wild, back-patch to all supported branches.
2011-08-27 22:36:57 +02:00
|
|
|
/*
|
|
|
|
* Server failure of some sort, such as failure to
|
|
|
|
* fork a backend process. We need to process and
|
|
|
|
* report the error message, which might be formatted
|
|
|
|
* according to either protocol 2 or protocol 3.
|
|
|
|
* Rather than duplicate the code for that, we flip
|
|
|
|
* into AWAITING_RESPONSE state and let the code there
|
|
|
|
* deal with it. Note we have *not* consumed the "E"
|
|
|
|
* byte here.
|
|
|
|
*/
|
|
|
|
conn->status = CONNECTION_AWAITING_RESPONSE;
|
2003-06-08 19:43:00 +02:00
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2003-06-08 19:43:00 +02:00
|
|
|
libpq_gettext("received invalid response to SSL negotiation: %c\n"),
|
|
|
|
SSLok);
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
}
|
2003-08-04 02:43:34 +02:00
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
|
|
|
* Begin or continue the SSL negotiation process.
|
|
|
|
*/
|
|
|
|
pollres = pqsecure_open_client(conn);
|
|
|
|
if (pollres == PGRES_POLLING_OK)
|
|
|
|
{
|
2021-11-08 17:14:56 +01:00
|
|
|
/*
|
|
|
|
* At this point we should have no data already buffered.
|
|
|
|
* If we do, it was received before we performed the SSL
|
|
|
|
* handshake, so it wasn't encrypted and indeed may have
|
|
|
|
* been injected by a man-in-the-middle.
|
|
|
|
*/
|
|
|
|
if (conn->inCursor != conn->inEnd)
|
|
|
|
{
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("received unencrypted data after SSL response\n"));
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/* SSL handshake done, ready to send startup packet */
|
|
|
|
conn->status = CONNECTION_MADE;
|
|
|
|
return PGRES_POLLING_WRITING;
|
|
|
|
}
|
2006-11-21 17:28:00 +01:00
|
|
|
if (pollres == PGRES_POLLING_FAILED)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Failed ... if sslmode is "prefer" then do a non-SSL
|
|
|
|
* retry
|
|
|
|
*/
|
|
|
|
if (conn->sslmode[0] == 'p' /* "prefer" */
|
|
|
|
&& conn->allow_ssl_try /* redundant? */
|
|
|
|
&& !conn->wait_ssl_try) /* redundant? */
|
|
|
|
{
|
|
|
|
/* only retry once */
|
|
|
|
conn->allow_ssl_try = false;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
need_new_connection = true;
|
2006-11-21 17:28:00 +01:00
|
|
|
goto keep_going;
|
|
|
|
}
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* Else it's a hard failure */
|
|
|
|
goto error_return;
|
2006-11-21 17:28:00 +01:00
|
|
|
}
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* Else, return POLLING_READING or POLLING_WRITING status */
|
2003-06-08 19:43:00 +02:00
|
|
|
return pollres;
|
|
|
|
#else /* !USE_SSL */
|
|
|
|
/* can't get here */
|
|
|
|
goto error_return;
|
|
|
|
#endif /* USE_SSL */
|
|
|
|
}
|
|
|
|
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
case CONNECTION_GSS_STARTUP:
|
|
|
|
{
|
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
PostgresPollingStatusType pollres;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we haven't yet, get the postmaster's response to our
|
|
|
|
* negotiation packet
|
|
|
|
*/
|
|
|
|
if (conn->try_gss && !conn->gctx)
|
|
|
|
{
|
|
|
|
char gss_ok;
|
|
|
|
int rdresult = pqReadData(conn);
|
|
|
|
|
|
|
|
if (rdresult < 0)
|
|
|
|
/* pqReadData fills in error message */
|
|
|
|
goto error_return;
|
|
|
|
else if (rdresult == 0)
|
|
|
|
/* caller failed to wait for data */
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
if (pqGetc(&gss_ok, conn) < 0)
|
|
|
|
/* shouldn't happen... */
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
|
|
|
|
if (gss_ok == 'E')
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Server failure of some sort. Assume it's a
|
|
|
|
* protocol version support failure, and let's see if
|
|
|
|
* we can't recover (if it's not, we'll get a better
|
|
|
|
* error message on retry). Server gets fussy if we
|
|
|
|
* don't hang up the socket, though.
|
|
|
|
*/
|
|
|
|
conn->try_gss = false;
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
need_new_connection = true;
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mark byte consumed */
|
|
|
|
conn->inStart = conn->inCursor;
|
|
|
|
|
|
|
|
if (gss_ok == 'N')
|
|
|
|
{
|
|
|
|
/* Server doesn't want GSSAPI; fall back if we can */
|
|
|
|
if (conn->gssencmode[0] == 'r')
|
|
|
|
{
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("server doesn't support GSSAPI encryption, but it was required\n"));
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->try_gss = false;
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
/* We can proceed using this connection */
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
conn->status = CONNECTION_MADE;
|
|
|
|
return PGRES_POLLING_WRITING;
|
|
|
|
}
|
|
|
|
else if (gss_ok != 'G')
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("received invalid response to GSSAPI negotiation: %c\n"),
|
|
|
|
gss_ok);
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Begin or continue GSSAPI negotiation */
|
|
|
|
pollres = pqsecure_open_gss(conn);
|
|
|
|
if (pollres == PGRES_POLLING_OK)
|
|
|
|
{
|
2021-11-08 17:14:56 +01:00
|
|
|
/*
|
|
|
|
* At this point we should have no data already buffered.
|
|
|
|
* If we do, it was received before we performed the GSS
|
|
|
|
* handshake, so it wasn't encrypted and indeed may have
|
|
|
|
* been injected by a man-in-the-middle.
|
|
|
|
*/
|
|
|
|
if (conn->inCursor != conn->inEnd)
|
|
|
|
{
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("received unencrypted data after GSSAPI encryption response\n"));
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
/* All set for startup packet */
|
|
|
|
conn->status = CONNECTION_MADE;
|
|
|
|
return PGRES_POLLING_WRITING;
|
|
|
|
}
|
|
|
|
else if (pollres == PGRES_POLLING_FAILED &&
|
|
|
|
conn->gssencmode[0] == 'p')
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We failed, but we can retry on "prefer". Have to drop
|
|
|
|
* the current connection to do so, though.
|
|
|
|
*/
|
|
|
|
conn->try_gss = false;
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
need_new_connection = true;
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
return pollres;
|
|
|
|
#else /* !ENABLE_GSS */
|
|
|
|
/* unreachable */
|
|
|
|
goto error_return;
|
|
|
|
#endif /* ENABLE_GSS */
|
|
|
|
}
|
|
|
|
|
2003-06-08 19:43:00 +02:00
|
|
|
/*
|
|
|
|
* Handle authentication exchange: wait for postmaster messages
|
1999-11-30 04:08:19 +01:00
|
|
|
* and respond as necessary.
|
|
|
|
*/
|
|
|
|
case CONNECTION_AWAITING_RESPONSE:
|
|
|
|
{
|
|
|
|
char beresp;
|
2003-04-22 02:08:07 +02:00
|
|
|
int msgLength;
|
|
|
|
int avail;
|
2000-01-14 06:33:15 +01:00
|
|
|
AuthRequest areq;
|
2017-04-13 18:34:14 +02:00
|
|
|
int res;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
|
|
|
/*
|
2000-01-14 06:33:15 +01:00
|
|
|
* Scan the message from current point (note that if we find
|
|
|
|
* the message is incomplete, we will return without advancing
|
|
|
|
* inStart, and resume here next time).
|
2000-04-12 19:17:23 +02:00
|
|
|
*/
|
2000-01-14 06:33:15 +01:00
|
|
|
conn->inCursor = conn->inStart;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2003-04-22 02:08:07 +02:00
|
|
|
/* Read type byte */
|
2000-01-14 06:33:15 +01:00
|
|
|
if (pqGetc(&beresp, conn))
|
|
|
|
{
|
2002-08-27 17:02:50 +02:00
|
|
|
/* We'll come back when there is more data */
|
2000-01-14 06:33:15 +01:00
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
|
2003-04-22 02:08:07 +02:00
|
|
|
/*
|
|
|
|
* Validate message type: we expect only an authentication
|
|
|
|
* request or an error here. Anything else probably means
|
|
|
|
* it's not Postgres on the other end at all.
|
|
|
|
*/
|
|
|
|
if (!(beresp == 'R' || beresp == 'E'))
|
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2003-04-22 02:08:07 +02:00
|
|
|
libpq_gettext("expected authentication request from server, but received %c\n"),
|
|
|
|
beresp);
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
2021-03-04 09:45:55 +01:00
|
|
|
/* Read message length word */
|
|
|
|
if (pqGetInt(&msgLength, 4, conn))
|
2003-06-08 19:43:00 +02:00
|
|
|
{
|
2021-03-04 09:45:55 +01:00
|
|
|
/* We'll come back when there is more data */
|
|
|
|
return PGRES_POLLING_READING;
|
2003-04-22 02:08:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to validate message length before using it.
|
2007-07-10 15:14:22 +02:00
|
|
|
* Authentication requests can't be very large, although GSS
|
|
|
|
* auth requests may not be that small. Errors can be a
|
2003-04-22 02:08:07 +02:00
|
|
|
* little larger, but not huge. If we see a large apparent
|
|
|
|
* length in an error, it means we're really talking to a
|
2021-03-04 09:45:55 +01:00
|
|
|
* pre-3.0-protocol server; cope. (Before version 14, the
|
|
|
|
* server also used the old protocol for errors that happened
|
|
|
|
* before processing the startup packet.)
|
2003-04-22 02:08:07 +02:00
|
|
|
*/
|
2007-07-10 15:14:22 +02:00
|
|
|
if (beresp == 'R' && (msgLength < 8 || msgLength > 2000))
|
2003-04-22 02:08:07 +02:00
|
|
|
{
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2003-04-22 02:08:07 +02:00
|
|
|
libpq_gettext("expected authentication request from server, but received %c\n"),
|
|
|
|
beresp);
|
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (beresp == 'E' && (msgLength < 8 || msgLength > 30000))
|
2000-04-12 19:17:23 +02:00
|
|
|
{
|
2003-04-22 02:08:07 +02:00
|
|
|
/* Handle error from a pre-3.0 server */
|
|
|
|
conn->inCursor = conn->inStart + 1; /* reread data */
|
2008-10-27 10:42:31 +01:00
|
|
|
if (pqGets_append(&conn->errorMessage, conn))
|
2000-04-12 19:17:23 +02:00
|
|
|
{
|
2002-08-27 18:21:51 +02:00
|
|
|
/* We'll come back when there is more data */
|
1999-11-30 04:08:19 +01:00
|
|
|
return PGRES_POLLING_READING;
|
2000-04-12 19:17:23 +02:00
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
/* OK, we read the message; mark data consumed */
|
|
|
|
conn->inStart = conn->inCursor;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
|
|
|
/*
|
2021-03-04 09:56:33 +01:00
|
|
|
* Before 7.2, the postmaster didn't always end its
|
|
|
|
* messages with a newline, so add one if needed to
|
|
|
|
* conform to libpq conventions.
|
2000-04-12 19:17:23 +02:00
|
|
|
*/
|
2021-03-04 09:56:33 +01:00
|
|
|
if (conn->errorMessage.len == 0 ||
|
|
|
|
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
|
|
|
|
{
|
|
|
|
appendPQExpBufferChar(&conn->errorMessage, '\n');
|
|
|
|
}
|
2003-06-08 19:43:00 +02:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
2003-04-22 02:08:07 +02:00
|
|
|
/*
|
|
|
|
* Can't process if message body isn't all here yet.
|
|
|
|
*/
|
|
|
|
msgLength -= 4;
|
|
|
|
avail = conn->inEnd - conn->inCursor;
|
|
|
|
if (avail < msgLength)
|
2000-04-12 19:17:23 +02:00
|
|
|
{
|
2003-04-22 02:08:07 +02:00
|
|
|
/*
|
|
|
|
* Before returning, try to enlarge the input buffer if
|
|
|
|
* needed to hold the whole message; see notes in
|
2003-06-08 19:43:00 +02:00
|
|
|
* pqParseInput3.
|
2003-04-22 02:08:07 +02:00
|
|
|
*/
|
2008-05-30 00:02:44 +02:00
|
|
|
if (pqCheckInBufferSpace(conn->inCursor + (size_t) msgLength,
|
|
|
|
conn))
|
2003-04-22 02:08:07 +02:00
|
|
|
goto error_return;
|
|
|
|
/* We'll come back when there is more data */
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle errors. */
|
|
|
|
if (beresp == 'E')
|
|
|
|
{
|
2021-03-04 09:45:55 +01:00
|
|
|
if (pqGetErrorNotice3(conn, true))
|
2003-04-22 02:08:07 +02:00
|
|
|
{
|
2021-03-04 09:45:55 +01:00
|
|
|
/* We'll come back when there is more data */
|
|
|
|
return PGRES_POLLING_READING;
|
2003-04-22 02:08:07 +02:00
|
|
|
}
|
|
|
|
/* OK, we read the message; mark data consumed */
|
|
|
|
conn->inStart = conn->inCursor;
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
|
2021-01-11 20:12:31 +01:00
|
|
|
/*
|
|
|
|
* If error is "cannot connect now", try the next host if
|
|
|
|
* any (but we don't want to consider additional addresses
|
|
|
|
* for this host, nor is there much point in changing SSL
|
|
|
|
* or GSS mode). This is helpful when dealing with
|
|
|
|
* standby servers that might not be in hot-standby state.
|
|
|
|
*/
|
|
|
|
if (strcmp(conn->last_sqlstate,
|
|
|
|
ERRCODE_CANNOT_CONNECT_NOW) == 0)
|
|
|
|
{
|
|
|
|
conn->try_next_host = true;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* Check to see if we should mention pgpassfile */
|
|
|
|
pgpassfileWarning(conn);
|
|
|
|
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
#ifdef ENABLE_GSS
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If gssencmode is "prefer" and we're using GSSAPI, retry
|
|
|
|
* without it.
|
|
|
|
*/
|
|
|
|
if (conn->gssenc && conn->gssencmode[0] == 'p')
|
|
|
|
{
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
/* only retry once */
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
conn->try_gss = false;
|
Fix bugs in libpq's GSSAPI encryption support.
The critical issue fixed here is that if a GSSAPI-encrypted connection
is successfully made, pqsecure_open_gss() cleared conn->allow_ssl_try,
as an admittedly-hacky way of preventing us from then trying to tunnel
SSL encryption over the already-encrypted connection. The problem
with that is that if we abandon the GSSAPI connection because of a
failure during authentication, we would not attempt SSL encryption
in the next try with the same server. This can lead to unexpected
connection failure, or silently getting a non-encrypted connection
where an encrypted one is expected.
Fortunately, we'd only manage to make a GSSAPI-encrypted connection
if both client and server hold valid tickets in the same Kerberos
infrastructure, which is a relatively uncommon environment.
Nonetheless this is a very nasty bug with potential security
consequences. To fix, don't reset the flag, instead adding a
check for conn->gssenc being already true when deciding whether
to try to initiate SSL.
While here, fix some lesser issues in libpq's GSSAPI code:
* Use the need_new_connection stanza when dropping an attempted
GSSAPI connection, instead of partially duplicating that code.
The consequences of this are pretty minor: AFAICS it could only
lead to auth_req_received or password_needed remaining set when
they shouldn't, which is not too harmful.
* Fix pg_GSS_error() to not repeat the "mprefix" it's given multiple
times, and to notice any failure return from gss_display_status().
* Avoid gratuitous dependency on NI_MAXHOST in
pg_GSS_load_servicename().
Per report from Mikael Gustavsson. Back-patch to v12 where
this code was introduced.
Discussion: https://postgr.es/m/e5b0b6ed05764324a2f3fe7acfc766d5@smhi.se
2020-12-28 21:43:44 +01:00
|
|
|
need_new_connection = true;
|
GSSAPI encryption support
On both the frontend and backend, prepare for GSSAPI encryption
support by moving common code for error handling into a separate file.
Fix a TODO for handling multiple status messages in the process.
Eliminate the OIDs, which have not been needed for some time.
Add frontend and backend encryption support functions. Keep the
context initiation for authentication-only separate on both the
frontend and backend in order to avoid concerns about changing the
requested flags to include encryption support.
In postmaster, pull GSSAPI authorization checking into a shared
function. Also share the initiator name between the encryption and
non-encryption codepaths.
For HBA, add "hostgssenc" and "hostnogssenc" entries that behave
similarly to their SSL counterparts. "hostgssenc" requires either
"gss", "trust", or "reject" for its authentication.
Similarly, add a "gssencmode" parameter to libpq. Supported values are
"disable", "require", and "prefer". Notably, negotiation will only be
attempted if credentials can be acquired. Move credential acquisition
into its own function to support this behavior.
Add a simple pg_stat_gssapi view similar to pg_stat_ssl, for monitoring
if GSSAPI authentication was used, what principal was used, and if
encryption is being used on the connection.
Finally, add documentation for everything new, and update existing
documentation on connection security.
Thanks to Michael Paquier for the Windows fixes.
Author: Robbie Harwood, with changes to the read/write functions by me.
Reviewed in various forms and at different times by: Michael Paquier,
Andres Freund, David Steele.
Discussion: https://www.postgresql.org/message-id/flat/jlg1tgq1ktm.fsf@thriss.redhat.com
2019-04-03 21:02:33 +02:00
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
#ifdef USE_SSL
|
2003-08-04 02:43:34 +02:00
|
|
|
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
/*
|
2003-08-01 23:27:27 +02:00
|
|
|
* if sslmode is "allow" and we haven't tried an SSL
|
|
|
|
* connection already, then retry with an SSL connection
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
*/
|
2003-08-01 23:27:27 +02:00
|
|
|
if (conn->sslmode[0] == 'a' /* "allow" */
|
Break out OpenSSL-specific code to separate files.
This refactoring is in preparation for adding support for other SSL
implementations, with no user-visible effects. There are now two #defines,
USE_OPENSSL which is defined when building with OpenSSL, and USE_SSL which
is defined when building with any SSL implementation. Currently, OpenSSL is
the only implementation so the two #defines go together, but USE_SSL is
supposed to be used for implementation-independent code.
The libpq SSL code is changed to use a custom BIO, which does all the raw
I/O, like we've been doing in the backend for a long time. That makes it
possible to use MSG_NOSIGNAL to block SIGPIPE when using SSL, which avoids
a couple of syscall for each send(). Probably doesn't make much performance
difference in practice - the SSL encryption is expensive enough to mask the
effect - but it was a natural result of this refactoring.
Based on a patch by Martijn van Oosterhout from 2006. Briefly reviewed by
Alvaro Herrera, Andreas Karlsson, Jeff Janes.
2014-08-11 10:54:19 +02:00
|
|
|
&& !conn->ssl_in_use
|
2003-08-01 23:27:27 +02:00
|
|
|
&& conn->allow_ssl_try
|
|
|
|
&& conn->wait_ssl_try)
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
{
|
2003-08-01 23:27:27 +02:00
|
|
|
/* only retry once */
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
conn->wait_ssl_try = false;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
need_new_connection = true;
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if sslmode is "prefer" and we're in an SSL connection,
|
2003-08-01 23:27:27 +02:00
|
|
|
* then do a non-SSL retry
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
*/
|
2003-08-01 23:27:27 +02:00
|
|
|
if (conn->sslmode[0] == 'p' /* "prefer" */
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
&& conn->ssl_in_use
|
|
|
|
&& conn->allow_ssl_try /* redundant? */
|
2003-08-01 23:27:27 +02:00
|
|
|
&& !conn->wait_ssl_try) /* redundant? */
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
{
|
2003-08-01 23:27:27 +02:00
|
|
|
/* only retry once */
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
conn->allow_ssl_try = false;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
need_new_connection = true;
|
At long last I put together a patch to support 4 client SSL negotiation
modes (and replace the requiressl boolean). The four options were first
spelled out by Magnus Hagander <mha@sollentuna.net> on 2000-08-23 in email
to pgsql-hackers, archived here:
http://archives.postgresql.org/pgsql-hackers/2000-08/msg00639.php
My original less-flexible patch and the ensuing thread are archived at:
http://dbforums.com/t623845.html
Attached is a new patch, including documentation.
To sum up, there's a new client parameter "sslmode" and environment
variable "PGSSLMODE", with these options:
sslmode description
------- -----------
disable Unencrypted non-SSL only
allow Negotiate, prefer non-SSL
prefer Negotiate, prefer SSL (default)
require Require SSL
The only change to the server is a new pg_hba.conf line type,
"hostnossl", for specifying connections that are not allowed to use SSL
(for example, to prevent servers on a local network from accidentally
using SSL and wasting cycles). Thus the 3 pg_hba.conf line types are:
pg_hba.conf line types
----------------------
host applies to either SSL or regular connections
hostssl applies only to SSL connections
hostnossl applies only to regular connections
These client and server options, the postgresql.conf ssl = false option,
and finally the possibility of compiling with no SSL support at all,
make quite a range of combinations to test. I threw together a test
script to try many of them out. It's in a separate tarball with its
config files, a patch to psql so it'll announce SSL connections even in
absence of a tty, and the test output. The test is especially informative
when run on the same tty the postmaster was started on, so the FATAL:
errors during negotiation are interleaved with the psql client output.
I saw Tom write that new submissions for 7.4 have to be in before midnight
local time, and since I'm on the east coast in the US, this just makes it
in before the bell. :)
Jon Jensen
2003-07-26 15:50:02 +02:00
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
|
|
|
}
|
|
|
|
|
2003-04-22 02:08:07 +02:00
|
|
|
/* It is an authentication request. */
|
2010-11-27 08:11:45 +01:00
|
|
|
conn->auth_req_received = true;
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/* Get the type of request. */
|
|
|
|
if (pqGetInt((int *) &areq, 4, conn))
|
|
|
|
{
|
|
|
|
/* We'll come back when there are more data */
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
2017-04-13 18:34:14 +02:00
|
|
|
msgLength -= 4;
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2000-04-12 19:17:23 +02:00
|
|
|
/*
|
2017-04-13 18:34:14 +02:00
|
|
|
* Process the rest of the authentication request message, and
|
|
|
|
* respond to it if necessary.
|
|
|
|
*
|
2000-01-14 06:33:15 +01:00
|
|
|
* Note that conn->pghost must be non-NULL if we are going to
|
1999-11-30 04:08:19 +01:00
|
|
|
* avoid the Kerberos code doing a hostname look-up.
|
|
|
|
*/
|
2017-04-13 18:34:14 +02:00
|
|
|
res = pg_fe_sendauth(areq, msgLength, conn);
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2017-04-13 18:34:14 +02:00
|
|
|
/* OK, we have processed the message; mark data consumed */
|
|
|
|
conn->inStart = conn->inCursor;
|
|
|
|
|
|
|
|
if (res != STATUS_OK)
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
|
|
|
|
2000-01-14 06:33:15 +01:00
|
|
|
/*
|
2005-10-17 18:24:20 +02:00
|
|
|
* Just make sure that any data sent by pg_fe_sendauth is
|
|
|
|
* flushed out. Although this theoretically could block, it
|
|
|
|
* really shouldn't since we don't send large auth responses.
|
2000-01-14 06:33:15 +01:00
|
|
|
*/
|
1999-11-30 04:08:19 +01:00
|
|
|
if (pqFlush(conn))
|
|
|
|
goto error_return;
|
|
|
|
|
|
|
|
if (areq == AUTH_REQ_OK)
|
|
|
|
{
|
|
|
|
/* We are done with authentication exchange */
|
|
|
|
conn->status = CONNECTION_AUTH_OK;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/*
|
2012-04-05 00:27:56 +02:00
|
|
|
* Set asyncStatus so that PQgetResult will think that
|
1999-11-30 04:08:19 +01:00
|
|
|
* what comes back next is the result of a query. See
|
|
|
|
* below.
|
|
|
|
*/
|
|
|
|
conn->asyncStatus = PGASYNC_BUSY;
|
|
|
|
}
|
|
|
|
|
2000-01-14 06:33:15 +01:00
|
|
|
/* Look to see if we have more data yet. */
|
|
|
|
goto keep_going;
|
1999-08-31 03:37:37 +02:00
|
|
|
}
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
case CONNECTION_AUTH_OK:
|
|
|
|
{
|
|
|
|
/*
|
2000-01-14 06:33:15 +01:00
|
|
|
* Now we expect to hear from the backend. A ReadyForQuery
|
|
|
|
* message indicates that startup is successful, but we might
|
|
|
|
* also get an Error message indicating failure. (Notice
|
|
|
|
* messages indicating nonfatal warnings are also allowed by
|
2003-04-25 21:45:10 +02:00
|
|
|
* the protocol, as are ParameterStatus and BackendKeyData
|
|
|
|
* messages.) Easiest way to handle this is to let
|
|
|
|
* PQgetResult() read the messages. We just have to fake it
|
|
|
|
* out about the state of the connection, by setting
|
2000-01-14 06:33:15 +01:00
|
|
|
* asyncStatus = PGASYNC_BUSY (done above).
|
|
|
|
*/
|
1999-11-30 04:08:19 +01:00
|
|
|
|
2000-03-24 02:39:55 +01:00
|
|
|
if (PQisBusy(conn))
|
1999-11-30 04:08:19 +01:00
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
|
|
|
|
res = PQgetResult(conn);
|
|
|
|
|
2000-04-12 19:17:23 +02:00
|
|
|
/*
|
1999-11-30 04:08:19 +01:00
|
|
|
* NULL return indicating we have gone to IDLE state is
|
1998-08-09 04:59:33 +02:00
|
|
|
* expected
|
2000-04-12 19:17:23 +02:00
|
|
|
*/
|
|
|
|
if (res)
|
|
|
|
{
|
1999-11-30 04:08:19 +01:00
|
|
|
if (res->resultStatus != PGRES_FATAL_ERROR)
|
2013-11-18 17:29:01 +01:00
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("unexpected message from server during startup\n"));
|
2009-12-02 05:38:35 +01:00
|
|
|
else if (conn->send_appname &&
|
|
|
|
(conn->appname || conn->fbappname))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we tried to send application_name, check to see
|
2010-02-17 05:19:41 +01:00
|
|
|
* if the error is about that --- pre-9.0 servers will
|
2009-12-02 05:38:35 +01:00
|
|
|
* reject it at this stage of the process. If so,
|
|
|
|
* close the connection and retry without sending
|
|
|
|
* application_name. We could possibly get a false
|
|
|
|
* SQLSTATE match here and retry uselessly, but there
|
|
|
|
* seems no great harm in that; we'll just get the
|
|
|
|
* same error again if it's unrelated.
|
|
|
|
*/
|
|
|
|
const char *sqlstate;
|
|
|
|
|
|
|
|
sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
|
|
|
|
if (sqlstate &&
|
|
|
|
strcmp(sqlstate, ERRCODE_APPNAME_UNKNOWN) == 0)
|
|
|
|
{
|
|
|
|
PQclear(res);
|
|
|
|
conn->send_appname = false;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
need_new_connection = true;
|
2009-12-02 05:38:35 +01:00
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
}
|
2000-04-12 19:17:23 +02:00
|
|
|
|
|
|
|
/*
|
1999-11-30 04:08:19 +01:00
|
|
|
* if the resultStatus is FATAL, then conn->errorMessage
|
1999-08-31 03:37:37 +02:00
|
|
|
* already has a copy of the error; needn't copy it back.
|
2000-01-14 06:33:15 +01:00
|
|
|
* But add a newline if it's not there already, since
|
1999-08-31 03:37:37 +02:00
|
|
|
* postmaster error messages may not have one.
|
2000-04-12 19:17:23 +02:00
|
|
|
*/
|
2000-01-14 06:33:15 +01:00
|
|
|
if (conn->errorMessage.len <= 0 ||
|
|
|
|
conn->errorMessage.data[conn->errorMessage.len - 1] != '\n')
|
|
|
|
appendPQExpBufferChar(&conn->errorMessage, '\n');
|
1999-11-30 04:08:19 +01:00
|
|
|
PQclear(res);
|
|
|
|
goto error_return;
|
2000-04-12 19:17:23 +02:00
|
|
|
}
|
|
|
|
|
2019-09-10 17:13:29 +02:00
|
|
|
/* Almost there now ... */
|
|
|
|
conn->status = CONNECTION_CHECK_TARGET;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
|
|
|
|
case CONNECTION_CHECK_TARGET:
|
|
|
|
{
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
/*
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
* If a read-write, read-only, primary, or standby connection
|
|
|
|
* is required, see if we have one.
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
*/
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
if (conn->target_server_type == SERVER_TYPE_READ_WRITE ||
|
|
|
|
conn->target_server_type == SERVER_TYPE_READ_ONLY)
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
{
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
bool read_only_server;
|
|
|
|
|
2016-12-05 20:09:54 +01:00
|
|
|
/*
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
* If the server didn't report
|
|
|
|
* "default_transaction_read_only" or "in_hot_standby" at
|
|
|
|
* startup, we must determine its state by sending the
|
2021-03-04 09:45:55 +01:00
|
|
|
* query "SHOW transaction_read_only". This GUC exists in
|
|
|
|
* all server versions that support 3.0 protocol.
|
2016-12-05 20:09:54 +01:00
|
|
|
*/
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
if (conn->default_transaction_read_only == PG_BOOL_UNKNOWN ||
|
|
|
|
conn->in_hot_standby == PG_BOOL_UNKNOWN)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We use PQsendQueryContinue so that
|
|
|
|
* conn->errorMessage does not get cleared. We need
|
|
|
|
* to preserve any error messages related to previous
|
|
|
|
* hosts we have tried and failed to connect to.
|
|
|
|
*/
|
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
if (!PQsendQueryContinue(conn,
|
|
|
|
"SHOW transaction_read_only"))
|
|
|
|
goto error_return;
|
|
|
|
/* We'll return to this state when we have the answer */
|
|
|
|
conn->status = CONNECTION_CHECK_WRITABLE;
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, we can make the test */
|
|
|
|
read_only_server =
|
|
|
|
(conn->default_transaction_read_only == PG_BOOL_YES ||
|
|
|
|
conn->in_hot_standby == PG_BOOL_YES);
|
|
|
|
|
|
|
|
if ((conn->target_server_type == SERVER_TYPE_READ_WRITE) ?
|
|
|
|
read_only_server : !read_only_server)
|
|
|
|
{
|
|
|
|
/* Wrong server state, reject and try the next host */
|
|
|
|
if (conn->target_server_type == SERVER_TYPE_READ_WRITE)
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("session is read-only\n"));
|
|
|
|
else
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("session is not read-only\n"));
|
|
|
|
|
|
|
|
/* Close connection politely. */
|
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
sendTerminateConn(conn);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try next host if any, but we don't want to consider
|
|
|
|
* additional addresses for this host.
|
|
|
|
*/
|
|
|
|
conn->try_next_host = true;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (conn->target_server_type == SERVER_TYPE_PRIMARY ||
|
|
|
|
conn->target_server_type == SERVER_TYPE_STANDBY ||
|
|
|
|
conn->target_server_type == SERVER_TYPE_PREFER_STANDBY)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the server didn't report "in_hot_standby" at
|
|
|
|
* startup, we must determine its state by sending the
|
|
|
|
* query "SELECT pg_catalog.pg_is_in_recovery()". Servers
|
|
|
|
* before 9.0 don't have that function, but by the same
|
|
|
|
* token they don't have any standby mode, so we may just
|
|
|
|
* assume the result.
|
|
|
|
*/
|
|
|
|
if (conn->sversion < 90000)
|
|
|
|
conn->in_hot_standby = PG_BOOL_NO;
|
|
|
|
|
|
|
|
if (conn->in_hot_standby == PG_BOOL_UNKNOWN)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We use PQsendQueryContinue so that
|
|
|
|
* conn->errorMessage does not get cleared. We need
|
|
|
|
* to preserve any error messages related to previous
|
|
|
|
* hosts we have tried and failed to connect to.
|
|
|
|
*/
|
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
if (!PQsendQueryContinue(conn,
|
|
|
|
"SELECT pg_catalog.pg_is_in_recovery()"))
|
|
|
|
goto error_return;
|
|
|
|
/* We'll return to this state when we have the answer */
|
|
|
|
conn->status = CONNECTION_CHECK_STANDBY;
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, we can make the test */
|
|
|
|
if ((conn->target_server_type == SERVER_TYPE_PRIMARY) ?
|
|
|
|
(conn->in_hot_standby == PG_BOOL_YES) :
|
|
|
|
(conn->in_hot_standby == PG_BOOL_NO))
|
|
|
|
{
|
|
|
|
/* Wrong server state, reject and try the next host */
|
|
|
|
if (conn->target_server_type == SERVER_TYPE_PRIMARY)
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("server is in hot standby mode\n"));
|
|
|
|
else
|
|
|
|
appendPQExpBufferStr(&conn->errorMessage,
|
|
|
|
libpq_gettext("server is not in hot standby mode\n"));
|
|
|
|
|
|
|
|
/* Close connection politely. */
|
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
sendTerminateConn(conn);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try next host if any, but we don't want to consider
|
|
|
|
* additional addresses for this host.
|
|
|
|
*/
|
|
|
|
conn->try_next_host = true;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
}
|
|
|
|
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
/* We can release the address list now. */
|
|
|
|
release_conn_addrinfo(conn);
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
|
2021-09-13 22:53:11 +02:00
|
|
|
/*
|
|
|
|
* Contents of conn->errorMessage are no longer interesting
|
|
|
|
* (and it seems some clients expect it to be empty after a
|
|
|
|
* successful connection).
|
|
|
|
*/
|
Rearrange libpq's error reporting to avoid duplicated error text.
Since commit ffa2e4670, libpq accumulates text in conn->errorMessage
across a whole query cycle. In some situations, we may report more
than one error event within a cycle: the easiest case to reach is
where we report a FATAL error message from the server, and then a
bit later we detect loss of connection. Since, historically, each
error PGresult bears the entire content of conn->errorMessage,
this results in duplication of the FATAL message in any output that
concatenates the contents of the PGresults.
Accumulation in errorMessage still seems like a good idea, especially
in view of the number of places that did ad-hoc error concatenation
before ffa2e4670. So to fix this, let's track how much of
conn->errorMessage has been read out into error PGresults, and only
include new text in later PGresults. The tricky part of that is
to be sure that we never discard an error PGresult once made (else
we'd risk dropping some text, a problem much worse than duplication).
While libpq formerly did that in some code paths, a little bit of
rearrangement lets us postpone making an error PGresult at all until
we are about to return it.
A side benefit of that postponement is that it now becomes practical
to return a dummy static PGresult in cases where we hit out-of-memory
while trying to manufacture an error PGresult. This eliminates the
admittedly-very-rare case where we'd return NULL from PQgetResult,
indicating successful query completion, even though what actually
happened was an OOM failure.
Discussion: https://postgr.es/m/ab4288f8-be5c-57fb-2400-e3e857f53e46@enterprisedb.com
2022-02-18 21:35:15 +01:00
|
|
|
pqClearConnErrorState(conn);
|
2021-09-13 22:53:11 +02:00
|
|
|
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
/* We are open for business! */
|
2003-04-25 21:45:10 +02:00
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
return PGRES_POLLING_OK;
|
1999-11-30 04:08:19 +01:00
|
|
|
}
|
|
|
|
|
2017-02-15 17:03:30 +01:00
|
|
|
case CONNECTION_CONSUME:
|
|
|
|
{
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/*
|
|
|
|
* This state just makes sure the connection is idle after
|
|
|
|
* we've obtained the result of a SHOW or SELECT query. Once
|
|
|
|
* we're clear, return to CONNECTION_CHECK_TARGET state to
|
|
|
|
* decide what to do next. We must transiently set status =
|
|
|
|
* CONNECTION_OK in order to use the result-consuming
|
|
|
|
* subroutines.
|
|
|
|
*/
|
2017-02-15 17:03:30 +01:00
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
if (!PQconsumeInput(conn))
|
|
|
|
goto error_return;
|
|
|
|
|
|
|
|
if (PQisBusy(conn))
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_CONSUME;
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Call PQgetResult() again until we get a NULL result */
|
2017-02-15 17:03:30 +01:00
|
|
|
res = PQgetResult(conn);
|
|
|
|
if (res != NULL)
|
|
|
|
{
|
|
|
|
PQclear(res);
|
|
|
|
conn->status = CONNECTION_CONSUME;
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
return PGRES_POLLING_READING;
|
2017-02-15 17:03:30 +01:00
|
|
|
}
|
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
conn->status = CONNECTION_CHECK_TARGET;
|
|
|
|
goto keep_going;
|
2017-02-15 17:03:30 +01:00
|
|
|
}
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
case CONNECTION_CHECK_WRITABLE:
|
|
|
|
{
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/*
|
|
|
|
* Waiting for result of "SHOW transaction_read_only". We
|
|
|
|
* must transiently set status = CONNECTION_OK in order to use
|
|
|
|
* the result-consuming subroutines.
|
|
|
|
*/
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
if (!PQconsumeInput(conn))
|
|
|
|
goto error_return;
|
|
|
|
|
|
|
|
if (PQisBusy(conn))
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_CHECK_WRITABLE;
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = PQgetResult(conn);
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
PQntuples(res) == 1)
|
|
|
|
{
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
char *val = PQgetvalue(res, 0, 0);
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/*
|
|
|
|
* "transaction_read_only = on" proves that at least one
|
|
|
|
* of default_transaction_read_only and in_hot_standby is
|
|
|
|
* on, but we don't actually know which. We don't care
|
|
|
|
* though for the purpose of identifying a read-only
|
|
|
|
* session, so satisfy the CONNECTION_CHECK_TARGET code by
|
|
|
|
* claiming they are both on. On the other hand, if it's
|
|
|
|
* a read-write session, they are certainly both off.
|
|
|
|
*/
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
if (strncmp(val, "on", 2) == 0)
|
|
|
|
{
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
conn->default_transaction_read_only = PG_BOOL_YES;
|
|
|
|
conn->in_hot_standby = PG_BOOL_YES;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
conn->default_transaction_read_only = PG_BOOL_NO;
|
|
|
|
conn->in_hot_standby = PG_BOOL_NO;
|
|
|
|
}
|
|
|
|
PQclear(res);
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Finish reading messages before continuing */
|
|
|
|
conn->status = CONNECTION_CONSUME;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Something went wrong with "SHOW transaction_read_only". */
|
2022-07-03 20:11:05 +02:00
|
|
|
PQclear(res);
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Append error report to conn->errorMessage. */
|
2021-05-03 08:51:30 +02:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("\"%s\" failed\n"),
|
|
|
|
"SHOW transaction_read_only");
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
|
|
|
|
/* Close connection politely. */
|
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
sendTerminateConn(conn);
|
|
|
|
|
|
|
|
/* Try next host. */
|
|
|
|
conn->try_next_host = true;
|
|
|
|
goto keep_going;
|
|
|
|
}
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
case CONNECTION_CHECK_STANDBY:
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Waiting for result of "SELECT pg_is_in_recovery()". We
|
|
|
|
* must transiently set status = CONNECTION_OK in order to use
|
|
|
|
* the result-consuming subroutines.
|
|
|
|
*/
|
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
if (!PQconsumeInput(conn))
|
|
|
|
goto error_return;
|
|
|
|
|
|
|
|
if (PQisBusy(conn))
|
|
|
|
{
|
|
|
|
conn->status = CONNECTION_CHECK_STANDBY;
|
|
|
|
return PGRES_POLLING_READING;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = PQgetResult(conn);
|
|
|
|
if (res && PQresultStatus(res) == PGRES_TUPLES_OK &&
|
|
|
|
PQntuples(res) == 1)
|
|
|
|
{
|
|
|
|
char *val = PQgetvalue(res, 0, 0);
|
|
|
|
|
|
|
|
if (strncmp(val, "t", 1) == 0)
|
|
|
|
conn->in_hot_standby = PG_BOOL_YES;
|
|
|
|
else
|
|
|
|
conn->in_hot_standby = PG_BOOL_NO;
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
PQclear(res);
|
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Finish reading messages before continuing */
|
2017-02-15 17:03:30 +01:00
|
|
|
conn->status = CONNECTION_CONSUME;
|
|
|
|
goto keep_going;
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
}
|
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Something went wrong with "SELECT pg_is_in_recovery()". */
|
2022-07-03 20:11:05 +02:00
|
|
|
PQclear(res);
|
2017-07-10 11:28:57 +02:00
|
|
|
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* Append error report to conn->errorMessage. */
|
2021-05-03 08:51:30 +02:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
|
|
|
libpq_gettext("\"%s\" failed\n"),
|
|
|
|
"SELECT pg_is_in_recovery()");
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
|
|
|
|
/* Close connection politely. */
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
conn->status = CONNECTION_OK;
|
|
|
|
sendTerminateConn(conn);
|
|
|
|
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
/* Try next host. */
|
|
|
|
conn->try_next_host = true;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
goto keep_going;
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
}
|
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
default:
|
2008-10-27 10:42:31 +01:00
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2009-11-29 00:38:08 +01:00
|
|
|
libpq_gettext("invalid connection state %d, "
|
|
|
|
"probably indicative of memory corruption\n"),
|
2001-07-15 15:45:04 +02:00
|
|
|
conn->status);
|
1999-11-30 04:08:19 +01:00
|
|
|
goto error_return;
|
1998-05-07 01:51:16 +02:00
|
|
|
}
|
1998-01-26 02:42:53 +01:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/* Unreachable */
|
|
|
|
|
|
|
|
error_return:
|
2001-03-22 07:16:21 +01:00
|
|
|
|
1999-11-30 04:08:19 +01:00
|
|
|
/*
|
|
|
|
* We used to close the socket at this point, but that makes it awkward
|
|
|
|
* for those above us if they wish to remove this socket from their own
|
|
|
|
* records (an fd_set for example). We'll just have this socket closed
|
|
|
|
* when PQfinish is called (which is compulsory even after an error, since
|
|
|
|
* the connection structure must be freed).
|
1998-05-07 01:51:16 +02:00
|
|
|
*/
|
2003-06-08 19:43:00 +02:00
|
|
|
conn->status = CONNECTION_BAD;
|
1999-11-30 04:08:19 +01:00
|
|
|
return PGRES_POLLING_FAILED;
|
|
|
|
}
|
1998-05-07 01:51:16 +02:00
|
|
|
|
|
|
|
|
2010-11-25 19:09:38 +01:00
|
|
|
/*
|
|
|
|
* internal_ping
|
2010-11-27 07:30:34 +01:00
|
|
|
* Determine if a server is running and if we can connect to it.
|
|
|
|
*
|
|
|
|
* The argument is a connection that's been started, but not completed.
|
2010-11-25 19:09:38 +01:00
|
|
|
*/
|
2011-03-07 02:04:29 +01:00
|
|
|
static PGPing
|
2010-11-25 19:09:38 +01:00
|
|
|
internal_ping(PGconn *conn)
|
|
|
|
{
|
2010-11-27 07:30:34 +01:00
|
|
|
/* Say "no attempt" if we never got to PQconnectPoll */
|
|
|
|
if (!conn || !conn->options_valid)
|
|
|
|
return PQPING_NO_ATTEMPT;
|
|
|
|
|
|
|
|
/* Attempt to complete the connection */
|
|
|
|
if (conn->status != CONNECTION_BAD)
|
2010-11-25 19:09:38 +01:00
|
|
|
(void) connectDBComplete(conn);
|
|
|
|
|
2010-11-27 07:30:34 +01:00
|
|
|
/* Definitely OK if we succeeded */
|
|
|
|
if (conn->status != CONNECTION_BAD)
|
|
|
|
return PQPING_OK;
|
|
|
|
|
|
|
|
/*
|
2010-11-27 08:11:45 +01:00
|
|
|
* Here begins the interesting part of "ping": determine the cause of the
|
2010-11-27 07:30:34 +01:00
|
|
|
* failure in sufficient detail to decide what to return. We do not want
|
|
|
|
* to report that the server is not up just because we didn't have a valid
|
2010-11-27 08:11:45 +01:00
|
|
|
* password, for example. In fact, any sort of authentication request
|
|
|
|
* implies the server is up. (We need this check since the libpq side of
|
|
|
|
* things might have pulled the plug on the connection before getting an
|
|
|
|
* error as such from the postmaster.)
|
|
|
|
*/
|
|
|
|
if (conn->auth_req_received)
|
|
|
|
return PQPING_OK;
|
|
|
|
|
|
|
|
/*
|
2010-11-27 07:30:34 +01:00
|
|
|
* If we failed to get any ERROR response from the postmaster, report
|
|
|
|
* PQPING_NO_RESPONSE. This result could be somewhat misleading for a
|
|
|
|
* pre-7.4 server, since it won't send back a SQLSTATE, but those are long
|
|
|
|
* out of support. Another corner case where the server could return a
|
2019-08-05 05:14:58 +02:00
|
|
|
* failure without a SQLSTATE is fork failure, but PQPING_NO_RESPONSE
|
|
|
|
* isn't totally unreasonable for that anyway. We expect that every other
|
2010-11-27 07:30:34 +01:00
|
|
|
* failure case in a modern server will produce a report with a SQLSTATE.
|
|
|
|
*
|
|
|
|
* NOTE: whenever we get around to making libpq generate SQLSTATEs for
|
|
|
|
* client-side errors, we should either not store those into
|
|
|
|
* last_sqlstate, or add an extra flag so we can tell client-side errors
|
|
|
|
* apart from server-side ones.
|
|
|
|
*/
|
|
|
|
if (strlen(conn->last_sqlstate) != 5)
|
|
|
|
return PQPING_NO_RESPONSE;
|
|
|
|
|
|
|
|
/*
|
2022-01-07 08:05:31 +01:00
|
|
|
* Report PQPING_REJECT if server says it's not accepting connections.
|
2010-11-27 07:30:34 +01:00
|
|
|
*/
|
|
|
|
if (strcmp(conn->last_sqlstate, ERRCODE_CANNOT_CONNECT_NOW) == 0)
|
|
|
|
return PQPING_REJECT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Any other SQLSTATE can be taken to indicate that the server is up.
|
|
|
|
* Presumably it didn't like our username, password, or database name; or
|
|
|
|
* perhaps it had some transient failure, but that should not be taken as
|
|
|
|
* meaning "it's down".
|
|
|
|
*/
|
|
|
|
return PQPING_OK;
|
2010-11-25 19:09:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
1998-05-07 01:51:16 +02:00
|
|
|
/*
|
|
|
|
* makeEmptyPGconn
|
|
|
|
* - create a PGconn data structure with (as yet) no interesting data
|
|
|
|
*/
|
|
|
|
static PGconn *
|
|
|
|
makeEmptyPGconn(void)
|
|
|
|
{
|
2003-08-01 23:27:27 +02:00
|
|
|
PGconn *conn;
|
1998-09-01 06:40:42 +02:00
|
|
|
|
2003-07-27 05:32:26 +02:00
|
|
|
#ifdef WIN32
|
2014-05-06 18:12:18 +02:00
|
|
|
|
2005-05-05 18:40:42 +02:00
|
|
|
/*
|
In libpq for Windows, call WSAStartup once and WSACleanup not at all.
The Windows documentation insists that every WSAStartup call should
have a matching WSACleanup call. However, if that ever had actual
relevance, it wasn't in this century. Every remotely-modern Windows
kernel is capable of cleaning up when a process exits without doing
that, and must be so to avoid resource leaks in case of a process
crash. Moreover, Postgres backends have done WSAStartup without
WSACleanup since commit 4cdf51e64 in 2004, and we've never seen any
indication of a problem with that.
libpq's habit of doing WSAStartup during connection start and
WSACleanup during shutdown is also rather inefficient, since a
series of non-overlapping connection requests leads to repeated,
quite expensive DLL unload/reload cycles. We document a workaround
for that (having the application call WSAStartup for itself), but
that's just a kluge. It's also worth noting that it's far from
uncommon for applications to exit without doing PQfinish, and
we've not heard reports of trouble from that either.
However, the real reason for acting on this is that recent
experiments by Alexander Lakhin suggest that calling WSACleanup
during PQfinish might be triggering the symptom we occasionally see
that a process using libpq fails to emit expected stdio output.
Therefore, let's change libpq so that it calls WSAStartup only
once per process, during the first connection attempt, and never
calls WSACleanup at all.
While at it, get rid of the only other WSACleanup call in our code
tree, in pg_dump/parallel.c; that presumably is equally useless.
If this proves to suppress the fairly-common ecpg test failures
we see on Windows, I'll back-patch, but for now let's just do it
in HEAD and see what happens.
Discussion: https://postgr.es/m/ac976d8c-03df-d6b8-025c-15a2de8d9af1@postgrespro.ru
2020-10-17 22:53:48 +02:00
|
|
|
* Make sure socket support is up and running in this process.
|
|
|
|
*
|
|
|
|
* Note: the Windows documentation says that we should eventually do a
|
|
|
|
* matching WSACleanup() call, but experience suggests that that is at
|
|
|
|
* least as likely to cause problems as fix them. So we don't.
|
2005-05-05 18:40:42 +02:00
|
|
|
*/
|
In libpq for Windows, call WSAStartup once and WSACleanup not at all.
The Windows documentation insists that every WSAStartup call should
have a matching WSACleanup call. However, if that ever had actual
relevance, it wasn't in this century. Every remotely-modern Windows
kernel is capable of cleaning up when a process exits without doing
that, and must be so to avoid resource leaks in case of a process
crash. Moreover, Postgres backends have done WSAStartup without
WSACleanup since commit 4cdf51e64 in 2004, and we've never seen any
indication of a problem with that.
libpq's habit of doing WSAStartup during connection start and
WSACleanup during shutdown is also rather inefficient, since a
series of non-overlapping connection requests leads to repeated,
quite expensive DLL unload/reload cycles. We document a workaround
for that (having the application call WSAStartup for itself), but
that's just a kluge. It's also worth noting that it's far from
uncommon for applications to exit without doing PQfinish, and
we've not heard reports of trouble from that either.
However, the real reason for acting on this is that recent
experiments by Alexander Lakhin suggest that calling WSACleanup
during PQfinish might be triggering the symptom we occasionally see
that a process using libpq fails to emit expected stdio output.
Therefore, let's change libpq so that it calls WSAStartup only
once per process, during the first connection attempt, and never
calls WSACleanup at all.
While at it, get rid of the only other WSACleanup call in our code
tree, in pg_dump/parallel.c; that presumably is equally useless.
If this proves to suppress the fairly-common ecpg test failures
we see on Windows, I'll back-patch, but for now let's just do it
in HEAD and see what happens.
Discussion: https://postgr.es/m/ac976d8c-03df-d6b8-025c-15a2de8d9af1@postgrespro.ru
2020-10-17 22:53:48 +02:00
|
|
|
static bool wsastartup_done = false;
|
2003-07-27 05:32:26 +02:00
|
|
|
|
In libpq for Windows, call WSAStartup once and WSACleanup not at all.
The Windows documentation insists that every WSAStartup call should
have a matching WSACleanup call. However, if that ever had actual
relevance, it wasn't in this century. Every remotely-modern Windows
kernel is capable of cleaning up when a process exits without doing
that, and must be so to avoid resource leaks in case of a process
crash. Moreover, Postgres backends have done WSAStartup without
WSACleanup since commit 4cdf51e64 in 2004, and we've never seen any
indication of a problem with that.
libpq's habit of doing WSAStartup during connection start and
WSACleanup during shutdown is also rather inefficient, since a
series of non-overlapping connection requests leads to repeated,
quite expensive DLL unload/reload cycles. We document a workaround
for that (having the application call WSAStartup for itself), but
that's just a kluge. It's also worth noting that it's far from
uncommon for applications to exit without doing PQfinish, and
we've not heard reports of trouble from that either.
However, the real reason for acting on this is that recent
experiments by Alexander Lakhin suggest that calling WSACleanup
during PQfinish might be triggering the symptom we occasionally see
that a process using libpq fails to emit expected stdio output.
Therefore, let's change libpq so that it calls WSAStartup only
once per process, during the first connection attempt, and never
calls WSACleanup at all.
While at it, get rid of the only other WSACleanup call in our code
tree, in pg_dump/parallel.c; that presumably is equally useless.
If this proves to suppress the fairly-common ecpg test failures
we see on Windows, I'll back-patch, but for now let's just do it
in HEAD and see what happens.
Discussion: https://postgr.es/m/ac976d8c-03df-d6b8-025c-15a2de8d9af1@postgrespro.ru
2020-10-17 22:53:48 +02:00
|
|
|
if (!wsastartup_done)
|
|
|
|
{
|
|
|
|
WSADATA wsaData;
|
|
|
|
|
2020-10-18 18:56:43 +02:00
|
|
|
if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0)
|
In libpq for Windows, call WSAStartup once and WSACleanup not at all.
The Windows documentation insists that every WSAStartup call should
have a matching WSACleanup call. However, if that ever had actual
relevance, it wasn't in this century. Every remotely-modern Windows
kernel is capable of cleaning up when a process exits without doing
that, and must be so to avoid resource leaks in case of a process
crash. Moreover, Postgres backends have done WSAStartup without
WSACleanup since commit 4cdf51e64 in 2004, and we've never seen any
indication of a problem with that.
libpq's habit of doing WSAStartup during connection start and
WSACleanup during shutdown is also rather inefficient, since a
series of non-overlapping connection requests leads to repeated,
quite expensive DLL unload/reload cycles. We document a workaround
for that (having the application call WSAStartup for itself), but
that's just a kluge. It's also worth noting that it's far from
uncommon for applications to exit without doing PQfinish, and
we've not heard reports of trouble from that either.
However, the real reason for acting on this is that recent
experiments by Alexander Lakhin suggest that calling WSACleanup
during PQfinish might be triggering the symptom we occasionally see
that a process using libpq fails to emit expected stdio output.
Therefore, let's change libpq so that it calls WSAStartup only
once per process, during the first connection attempt, and never
calls WSACleanup at all.
While at it, get rid of the only other WSACleanup call in our code
tree, in pg_dump/parallel.c; that presumably is equally useless.
If this proves to suppress the fairly-common ecpg test failures
we see on Windows, I'll back-patch, but for now let's just do it
in HEAD and see what happens.
Discussion: https://postgr.es/m/ac976d8c-03df-d6b8-025c-15a2de8d9af1@postgrespro.ru
2020-10-17 22:53:48 +02:00
|
|
|
return NULL;
|
|
|
|
wsastartup_done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Forget any earlier error */
|
2003-07-27 05:32:26 +02:00
|
|
|
WSASetLastError(0);
|
In libpq for Windows, call WSAStartup once and WSACleanup not at all.
The Windows documentation insists that every WSAStartup call should
have a matching WSACleanup call. However, if that ever had actual
relevance, it wasn't in this century. Every remotely-modern Windows
kernel is capable of cleaning up when a process exits without doing
that, and must be so to avoid resource leaks in case of a process
crash. Moreover, Postgres backends have done WSAStartup without
WSACleanup since commit 4cdf51e64 in 2004, and we've never seen any
indication of a problem with that.
libpq's habit of doing WSAStartup during connection start and
WSACleanup during shutdown is also rather inefficient, since a
series of non-overlapping connection requests leads to repeated,
quite expensive DLL unload/reload cycles. We document a workaround
for that (having the application call WSAStartup for itself), but
that's just a kluge. It's also worth noting that it's far from
uncommon for applications to exit without doing PQfinish, and
we've not heard reports of trouble from that either.
However, the real reason for acting on this is that recent
experiments by Alexander Lakhin suggest that calling WSACleanup
during PQfinish might be triggering the symptom we occasionally see
that a process using libpq fails to emit expected stdio output.
Therefore, let's change libpq so that it calls WSAStartup only
once per process, during the first connection attempt, and never
calls WSACleanup at all.
While at it, get rid of the only other WSACleanup call in our code
tree, in pg_dump/parallel.c; that presumably is equally useless.
If this proves to suppress the fairly-common ecpg test failures
we see on Windows, I'll back-patch, but for now let's just do it
in HEAD and see what happens.
Discussion: https://postgr.es/m/ac976d8c-03df-d6b8-025c-15a2de8d9af1@postgrespro.ru
2020-10-17 22:53:48 +02:00
|
|
|
#endif /* WIN32 */
|
2003-07-27 05:32:26 +02:00
|
|
|
|
2003-08-01 23:27:27 +02:00
|
|
|
conn = (PGconn *) malloc(sizeof(PGconn));
|
|
|
|
if (conn == NULL)
|
|
|
|
return conn;
|
|
|
|
|
2000-01-14 06:33:15 +01:00
|
|
|
/* Zero all pointers and booleans */
|
2004-01-07 19:56:30 +01:00
|
|
|
MemSet(conn, 0, sizeof(PGconn));
|
1998-05-07 01:51:16 +02:00
|
|
|
|
2012-08-02 19:10:30 +02:00
|
|
|
/* install default notice hooks */
|
2003-06-21 23:51:35 +02:00
|
|
|
conn->noticeHooks.noticeRec = defaultNoticeReceiver;
|
|
|
|
conn->noticeHooks.noticeProc = defaultNoticeProcessor;
|
2012-04-05 00:27:56 +02:00
|
|
|
|
1998-05-07 01:51:16 +02:00
|
|
|
conn->status = CONNECTION_BAD;
|
|
|
|
conn->asyncStatus = PGASYNC_IDLE;
|
2021-03-15 22:13:42 +01:00
|
|
|
conn->pipelineStatus = PQ_PIPELINE_OFF;
|
2003-06-21 23:51:35 +02:00
|
|
|
conn->xactStatus = PQTRANS_IDLE;
|
2006-02-13 23:33:57 +01:00
|
|
|
conn->options_valid = false;
|
|
|
|
conn->nonblocking = false;
|
2003-06-08 19:43:00 +02:00
|
|
|
conn->client_encoding = PG_SQL_ASCII;
|
Modify libpq's string-escaping routines to be aware of encoding considerations
and standard_conforming_strings. The encoding changes are needed for proper
escaping in multibyte encodings, as per the SQL-injection vulnerabilities
noted in CVE-2006-2313 and CVE-2006-2314. Concurrent fixes are being applied
to the server to ensure that it rejects queries that may have been corrupted
by attempted SQL injection, but this merely guarantees that unpatched clients
will fail rather than allow injection. An actual fix requires changing the
client-side code. While at it we have also fixed these routines to understand
about standard_conforming_strings, so that the upcoming changeover to SQL-spec
string syntax can be somewhat transparent to client code.
Since the existing API of PQescapeString and PQescapeBytea provides no way to
inform them which settings are in use, these functions are now deprecated in
favor of new functions PQescapeStringConn and PQescapeByteaConn. The new
functions take the PGconn to which the string will be sent as an additional
parameter, and look inside the connection structure to determine what to do.
So as to provide some functionality for clients using the old functions,
libpq stores the latest encoding and standard_conforming_strings values
received from the backend in static variables, and the old functions consult
these variables. This will work reliably in clients using only one Postgres
connection at a time, or even multiple connections if they all use the same
encoding and string syntax settings; which should cover many practical
scenarios.
Clients that use homebrew escaping methods, such as PHP's addslashes()
function or even hardwired regexp substitution, will require extra effort
to fix :-(. It is strongly recommended that such code be replaced by use of
PQescapeStringConn/PQescapeByteaConn if at all feasible.
2006-05-21 22:19:23 +02:00
|
|
|
conn->std_strings = false; /* unless server says differently */
|
Extend the abilities of libpq's target_session_attrs parameter.
In addition to the existing options of "any" and "read-write", we
now support "read-only", "primary", "standby", and "prefer-standby".
"read-write" retains its previous meaning of "transactions are
read-write by default", and "read-only" inverts that. The other
three modes test specifically for hot-standby status, which is not
quite the same thing. (Setting default_transaction_read_only on
a primary server renders it read-only to this logic, but not a
standby.)
Furthermore, if talking to a v14 or later server, no extra network
round trip is needed to detect the session's status; the GUC_REPORT
variables delivered by the server are enough. When talking to an
older server, a SHOW or SELECT query is issued to detect session
read-only-ness or server hot-standby state, as needed.
Haribabu Kommi, Greg Nancarrow, Vignesh C, Tom Lane; reviewed at
various times by Laurenz Albe, Takayuki Tsunakawa, Peter Smith.
Discussion: https://postgr.es/m/CAF3+xM+8-ztOkaV9gHiJ3wfgENTq97QcjXQt+rbFQ6F7oNzt9A@mail.gmail.com
2021-03-03 02:17:45 +01:00
|
|
|
conn->default_transaction_read_only = PG_BOOL_UNKNOWN;
|
|
|
|
conn->in_hot_standby = PG_BOOL_UNKNOWN;
|
2003-06-21 23:51:35 +02:00
|
|
|
conn->verbosity = PQERRORS_DEFAULT;
|
2015-09-05 17:58:20 +02:00
|
|
|
conn->show_context = PQSHOW_CONTEXT_ERRORS;
|
2014-04-17 01:46:51 +02:00
|
|
|
conn->sock = PGINVALID_SOCKET;
|
2021-04-01 00:16:58 +02:00
|
|
|
conn->Pfdebug = NULL;
|
2000-01-14 06:33:15 +01:00
|
|
|
|
1999-08-31 03:37:37 +02:00
|
|
|
/*
|
2003-04-19 02:02:30 +02:00
|
|
|
* We try to send at least 8K at a time, which is the usual size of pipe
|
|
|
|
* buffers on Unix systems. That way, when we are sending a large amount
|
1999-08-31 03:37:37 +02:00
|
|
|
* of data, we avoid incurring extra kernel context swaps for partial
|
2003-04-19 02:02:30 +02:00
|
|
|
* bufferloads. The output buffer is initially made 16K in size, and we
|
|
|
|
* try to dump it after accumulating 8K.
|
1999-08-31 03:37:37 +02:00
|
|
|
*
|
|
|
|
* With the same goal of minimizing context swaps, the input buffer will
|
|
|
|
* be enlarged anytime it has less than 8K free, so we initially allocate
|
|
|
|
* twice that.
|
|
|
|
*/
|
|
|
|
conn->inBufSize = 16 * 1024;
|
1998-05-07 01:51:16 +02:00
|
|
|
conn->inBuffer = (char *) malloc(conn->inBufSize);
|
2003-04-19 02:02:30 +02:00
|
|
|
conn->outBufSize = 16 * 1024;
|
1998-05-07 01:51:16 +02:00
|
|
|
conn->outBuffer = (char *) malloc(conn->outBufSize);
|
2012-04-05 00:27:56 +02:00
|
|
|
conn->rowBufLen = 32;
|
|
|
|
conn->rowBuf = (PGdataValue *) malloc(conn->rowBufLen * sizeof(PGdataValue));
|
1999-08-31 03:37:37 +02:00
|
|
|
initPQExpBuffer(&conn->errorMessage);
|
|
|
|
initPQExpBuffer(&conn->workBuffer);
|
2003-06-08 19:43:00 +02:00
|
|
|
|
1999-08-31 03:37:37 +02:00
|
|
|
if (conn->inBuffer == NULL ||
|
|
|
|
conn->outBuffer == NULL ||
|
2012-04-05 00:27:56 +02:00
|
|
|
conn->rowBuf == NULL ||
|
2008-11-26 01:26:23 +01:00
|
|
|
PQExpBufferBroken(&conn->errorMessage) ||
|
|
|
|
PQExpBufferBroken(&conn->workBuffer))
|
1998-05-07 01:51:16 +02:00
|
|
|
{
|
2000-01-14 06:33:15 +01:00
|
|
|
/* out of memory already :-( */
|
1998-05-07 01:51:16 +02:00
|
|
|
freePGconn(conn);
|
|
|
|
conn = NULL;
|
|
|
|
}
|
2003-06-08 19:43:00 +02:00
|
|
|
|
1998-05-07 01:51:16 +02:00
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
|
|
|
* freePGconn
|
2008-01-29 03:06:30 +01:00
|
|
|
* - free an idle (closed) PGconn data structure
|
2005-07-13 17:25:55 +02:00
|
|
|
*
|
2008-01-29 03:06:30 +01:00
|
|
|
* NOTE: this should not overlap any functionality with closePGconn().
|
|
|
|
* Clearing/resetting of transient state belongs there; what we do here is
|
|
|
|
* release data that is to be held for the life of the PGconn structure.
|
|
|
|
* If a value ought to be cleared/freed during PQreset(), do it there not here.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
freePGconn(PGconn *conn)
|
|
|
|
{
|
2008-09-17 06:31:08 +02:00
|
|
|
/* let any event procs clean up their state data */
|
2022-06-16 21:50:56 +02:00
|
|
|
for (int i = 0; i < conn->nEvents; i++)
|
2008-09-17 06:31:08 +02:00
|
|
|
{
|
|
|
|
PGEventConnDestroy evt;
|
|
|
|
|
|
|
|
evt.conn = conn;
|
|
|
|
(void) conn->events[i].proc(PGEVT_CONNDESTROY, &evt,
|
|
|
|
conn->events[i].passThrough);
|
|
|
|
free(conn->events[i].name);
|
|
|
|
}
|
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
/* clean up pg_conn_host structures */
|
2022-06-16 21:50:56 +02:00
|
|
|
for (int i = 0; i < conn->nconnhost; ++i)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->connhost[i].host);
|
|
|
|
free(conn->connhost[i].hostaddr);
|
|
|
|
free(conn->connhost[i].port);
|
|
|
|
if (conn->connhost[i].password != NULL)
|
2016-11-03 14:25:20 +01:00
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
explicit_bzero(conn->connhost[i].password, strlen(conn->connhost[i].password));
|
|
|
|
free(conn->connhost[i].password);
|
2016-11-03 14:25:20 +01:00
|
|
|
}
|
|
|
|
}
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->connhost);
|
|
|
|
|
|
|
|
free(conn->client_encoding_initial);
|
|
|
|
free(conn->events);
|
|
|
|
free(conn->pghost);
|
|
|
|
free(conn->pghostaddr);
|
|
|
|
free(conn->pgport);
|
|
|
|
free(conn->connect_timeout);
|
|
|
|
free(conn->pgtcp_user_timeout);
|
|
|
|
free(conn->pgoptions);
|
|
|
|
free(conn->appname);
|
|
|
|
free(conn->fbappname);
|
|
|
|
free(conn->dbName);
|
|
|
|
free(conn->replication);
|
|
|
|
free(conn->pguser);
|
1998-04-21 06:00:06 +02:00
|
|
|
if (conn->pgpass)
|
2019-09-05 08:15:58 +02:00
|
|
|
{
|
|
|
|
explicit_bzero(conn->pgpass, strlen(conn->pgpass));
|
1998-04-21 06:00:06 +02:00
|
|
|
free(conn->pgpass);
|
2019-09-05 08:15:58 +02:00
|
|
|
}
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->pgpassfile);
|
|
|
|
free(conn->channel_binding);
|
|
|
|
free(conn->keepalives);
|
|
|
|
free(conn->keepalives_idle);
|
|
|
|
free(conn->keepalives_interval);
|
|
|
|
free(conn->keepalives_count);
|
|
|
|
free(conn->sslmode);
|
|
|
|
free(conn->sslcert);
|
|
|
|
free(conn->sslkey);
|
2019-12-20 21:34:07 +01:00
|
|
|
if (conn->sslpassword)
|
2020-05-21 08:49:20 +02:00
|
|
|
{
|
|
|
|
explicit_bzero(conn->sslpassword, strlen(conn->sslpassword));
|
2019-12-20 21:34:07 +01:00
|
|
|
free(conn->sslpassword);
|
2020-05-21 08:49:20 +02:00
|
|
|
}
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->sslrootcert);
|
|
|
|
free(conn->sslcrl);
|
|
|
|
free(conn->sslcrldir);
|
|
|
|
free(conn->sslcompression);
|
|
|
|
free(conn->sslsni);
|
|
|
|
free(conn->requirepeer);
|
|
|
|
free(conn->ssl_min_protocol_version);
|
|
|
|
free(conn->ssl_max_protocol_version);
|
|
|
|
free(conn->gssencmode);
|
|
|
|
free(conn->krbsrvname);
|
|
|
|
free(conn->gsslib);
|
|
|
|
free(conn->connip);
|
1998-05-07 01:51:16 +02:00
|
|
|
/* Note that conn->Pfdebug is not ours to close or free */
|
2022-06-16 21:50:56 +02:00
|
|
|
free(conn->write_err_msg);
|
|
|
|
free(conn->inBuffer);
|
|
|
|
free(conn->outBuffer);
|
|
|
|
free(conn->rowBuf);
|
|
|
|
free(conn->target_session_attrs);
|
1999-08-31 03:37:37 +02:00
|
|
|
termPQExpBuffer(&conn->errorMessage);
|
|
|
|
termPQExpBuffer(&conn->workBuffer);
|
2008-01-29 03:06:30 +01:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
free(conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
* release_conn_addrinfo
|
|
|
|
* - Free any addrinfo list in the PGconn.
|
2003-06-21 23:51:35 +02:00
|
|
|
*/
|
1996-07-09 08:22:35 +02:00
|
|
|
static void
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
release_conn_addrinfo(PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
if (conn->addrlist)
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
{
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
pg_freeaddrinfo_all(conn->addrlist_family, conn->addrlist);
|
|
|
|
conn->addrlist = NULL;
|
|
|
|
conn->addr_cur = NULL; /* for safety */
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sendTerminateConn
|
|
|
|
* - Send a terminate message to backend.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sendTerminateConn(PGconn *conn)
|
|
|
|
{
|
2001-07-06 19:58:53 +02:00
|
|
|
/*
|
|
|
|
* Note that the protocol doesn't allow us to send Terminate messages
|
2001-08-15 20:42:16 +02:00
|
|
|
* during the startup phase.
|
|
|
|
*/
|
2014-04-17 01:46:51 +02:00
|
|
|
if (conn->sock != PGINVALID_SOCKET && conn->status == CONNECTION_OK)
|
1998-05-07 01:51:16 +02:00
|
|
|
{
|
|
|
|
/*
|
1998-08-09 04:59:33 +02:00
|
|
|
* Try to send "close connection" message to backend. Ignore any
|
2003-04-19 02:02:30 +02:00
|
|
|
* error.
|
1998-05-07 01:51:16 +02:00
|
|
|
*/
|
2021-03-04 09:45:55 +01:00
|
|
|
pqPutMsgStart('X', conn);
|
2003-04-19 02:02:30 +02:00
|
|
|
pqPutMsgEnd(conn);
|
2014-03-02 04:14:14 +01:00
|
|
|
(void) pqFlush(conn);
|
1998-05-07 01:51:16 +02:00
|
|
|
}
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* closePGconn
|
|
|
|
* - properly close a connection to the backend
|
|
|
|
*
|
|
|
|
* This should reset or release all transient state, but NOT the connection
|
|
|
|
* parameters. On exit, the PGconn should be in condition to start a fresh
|
|
|
|
* connection with the same parameters (see PQreset()).
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
closePGconn(PGconn *conn)
|
|
|
|
{
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/*
|
|
|
|
* If possible, send Terminate message to close the connection politely.
|
|
|
|
*/
|
libpq: Add target_session_attrs parameter.
Commit 274bb2b3857cc987cfa21d14775cae9b0dababa5 made it possible to
specify multiple IPs in a connection string, but that's not good
enough for the case where you have a read-write master and a bunch of
read-only standbys and want to connect to whichever server is the
master at the current time. This commit allows that, by making it
possible to specify target_session_attrs=read-write as a connection
parameter.
There was extensive discussion of the best name for the connection
parameter and its values as well as the best way to distinguish master
and standbys. For now, adopt the same solution as JDBC: if the user
wants a read-write connection, issue 'show transaction_read_only' and
rejection the connection if the result is 'on'. In the future, we
could add additional values of this new target_session_attrs parameter
that issue different queries; or we might have some way of
distinguishing the server type without resorting to an SQL query; but
right now, we have this, and that's (hopefully) a good start.
Victor Wagner and Mithun Cy. Design review by Álvaro Herrera, Catalin
Iacob, Takayuki Tsunakawa, and Craig Ringer; code review by me. I
changed Mithun's patch to skip all remaining IPs for a host if we
reject a connection based on this new parameter, rewrote the
documentation, and did some other cosmetic cleanup.
Discussion: http://postgr.es/m/CAD__OuhqPRGpcsfwPHz_PDqAGkoqS1UvnUnOnAB-LBWBW=wu4A@mail.gmail.com
2016-11-29 18:18:31 +01:00
|
|
|
sendTerminateConn(conn);
|
1998-05-07 01:51:16 +02:00
|
|
|
|
2000-01-24 03:12:58 +01:00
|
|
|
/*
|
2008-01-29 03:06:30 +01:00
|
|
|
* Must reset the blocking status so a possible reconnect will work.
|
|
|
|
*
|
|
|
|
* Don't call PQsetnonblocking() because it will fail if it's unable to
|
|
|
|
* flush the connection.
|
2000-01-24 03:12:58 +01:00
|
|
|
*/
|
2017-08-16 06:22:32 +02:00
|
|
|
conn->nonblocking = false;
|
2000-01-24 03:12:58 +01:00
|
|
|
|
1998-05-07 01:51:16 +02:00
|
|
|
/*
|
|
|
|
* Close the connection, reset all transient state, flush I/O buffers.
|
Rearrange libpq's error reporting to avoid duplicated error text.
Since commit ffa2e4670, libpq accumulates text in conn->errorMessage
across a whole query cycle. In some situations, we may report more
than one error event within a cycle: the easiest case to reach is
where we report a FATAL error message from the server, and then a
bit later we detect loss of connection. Since, historically, each
error PGresult bears the entire content of conn->errorMessage,
this results in duplication of the FATAL message in any output that
concatenates the contents of the PGresults.
Accumulation in errorMessage still seems like a good idea, especially
in view of the number of places that did ad-hoc error concatenation
before ffa2e4670. So to fix this, let's track how much of
conn->errorMessage has been read out into error PGresults, and only
include new text in later PGresults. The tricky part of that is
to be sure that we never discard an error PGresult once made (else
we'd risk dropping some text, a problem much worse than duplication).
While libpq formerly did that in some code paths, a little bit of
rearrangement lets us postpone making an error PGresult at all until
we are about to return it.
A side benefit of that postponement is that it now becomes practical
to return a dummy static PGresult in cases where we hit out-of-memory
while trying to manufacture an error PGresult. This eliminates the
admittedly-very-rare case where we'd return NULL from PQgetResult,
indicating successful query completion, even though what actually
happened was an OOM failure.
Discussion: https://postgr.es/m/ab4288f8-be5c-57fb-2400-e3e857f53e46@enterprisedb.com
2022-02-18 21:35:15 +01:00
|
|
|
* Note that this includes clearing conn's error state; we're no longer
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
* interested in any failures associated with the old connection, and we
|
|
|
|
* want a clean slate for any new connection attempt.
|
1998-05-07 01:51:16 +02:00
|
|
|
*/
|
2015-11-12 19:03:52 +01:00
|
|
|
pqDropConnection(conn, true);
|
1996-08-19 15:25:40 +02:00
|
|
|
conn->status = CONNECTION_BAD; /* Well, not really _bad_ - just absent */
|
1998-05-07 01:51:16 +02:00
|
|
|
conn->asyncStatus = PGASYNC_IDLE;
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
conn->xactStatus = PQTRANS_IDLE;
|
2021-03-15 22:13:42 +01:00
|
|
|
conn->pipelineStatus = PQ_PIPELINE_OFF;
|
2012-04-05 00:27:56 +02:00
|
|
|
pqClearAsyncResult(conn); /* deallocate result */
|
Rearrange libpq's error reporting to avoid duplicated error text.
Since commit ffa2e4670, libpq accumulates text in conn->errorMessage
across a whole query cycle. In some situations, we may report more
than one error event within a cycle: the easiest case to reach is
where we report a FATAL error message from the server, and then a
bit later we detect loss of connection. Since, historically, each
error PGresult bears the entire content of conn->errorMessage,
this results in duplication of the FATAL message in any output that
concatenates the contents of the PGresults.
Accumulation in errorMessage still seems like a good idea, especially
in view of the number of places that did ad-hoc error concatenation
before ffa2e4670. So to fix this, let's track how much of
conn->errorMessage has been read out into error PGresults, and only
include new text in later PGresults. The tricky part of that is
to be sure that we never discard an error PGresult once made (else
we'd risk dropping some text, a problem much worse than duplication).
While libpq formerly did that in some code paths, a little bit of
rearrangement lets us postpone making an error PGresult at all until
we are about to return it.
A side benefit of that postponement is that it now becomes practical
to return a dummy static PGresult in cases where we hit out-of-memory
while trying to manufacture an error PGresult. This eliminates the
admittedly-very-rare case where we'd return NULL from PQgetResult,
indicating successful query completion, even though what actually
happened was an OOM failure.
Discussion: https://postgr.es/m/ab4288f8-be5c-57fb-2400-e3e857f53e46@enterprisedb.com
2022-02-18 21:35:15 +01:00
|
|
|
pqClearConnErrorState(conn);
|
In libpq, don't look up all the hostnames at once.
Historically, we looked up the target hostname in connectDBStart, so that
PQconnectPoll did not need to do DNS name resolution. The patches that
added multiple-target-host support to libpq preserved this division of
labor; but it's really nonsensical now, because it means that if any one
of the target hosts fails to resolve in DNS, the connection fails. That
negates the no-single-point-of-failure goal of the feature. Additionally,
DNS lookups aren't exactly cheap, but the code did them all even if the
first connection attempt succeeds.
Hence, rearrange so that PQconnectPoll does the lookups, and only looks
up a hostname when it's time to try that host. This does mean that
PQconnectPoll could block on a DNS lookup --- but if you wanted to avoid
that, you should be using hostaddr, as the documentation has always
specified. It seems fairly unlikely that any applications would really
care whether the lookup occurs inside PQconnectStart or PQconnectPoll.
In addition to calling out that fact explicitly, do some other minor
wordsmithing in the docs around the multiple-target-host feature.
Since this seems like a bug in the multiple-target-host feature,
backpatch to v10 where that was introduced. In the back branches,
avoid moving any existing fields of struct pg_conn, just in case
any third-party code is looking into that struct.
Tom Lane, reviewed by Fabien Coelho
Discussion: https://postgr.es/m/4913.1533827102@sss.pgh.pa.us
2018-08-23 22:39:19 +02:00
|
|
|
release_conn_addrinfo(conn);
|
2016-11-03 14:25:20 +01:00
|
|
|
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
/* Reset all state obtained from server, too */
|
|
|
|
pqDropServerData(conn);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-06-12 02:00:21 +02:00
|
|
|
* PQfinish: properly close a connection to the backend. Also frees
|
|
|
|
* the PGconn data structure so it shouldn't be re-used after this.
|
|
|
|
*/
|
1996-07-09 08:22:35 +02:00
|
|
|
void
|
|
|
|
PQfinish(PGconn *conn)
|
|
|
|
{
|
1998-08-09 04:59:33 +02:00
|
|
|
if (conn)
|
1996-07-12 06:53:59 +02:00
|
|
|
{
|
1998-05-07 01:51:16 +02:00
|
|
|
closePGconn(conn);
|
1996-07-12 06:53:59 +02:00
|
|
|
freePGconn(conn);
|
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2005-06-12 02:00:21 +02:00
|
|
|
/*
|
|
|
|
* PQreset: resets the connection to the backend by closing the
|
|
|
|
* existing connection and creating a new one.
|
|
|
|
*/
|
1996-07-09 08:22:35 +02:00
|
|
|
void
|
|
|
|
PQreset(PGconn *conn)
|
|
|
|
{
|
1998-08-09 04:59:33 +02:00
|
|
|
if (conn)
|
1996-07-12 06:53:59 +02:00
|
|
|
{
|
1996-07-09 08:22:35 +02:00
|
|
|
closePGconn(conn);
|
1999-11-30 04:08:19 +01:00
|
|
|
|
2008-09-17 06:31:08 +02:00
|
|
|
if (connectDBStart(conn) && connectDBComplete(conn))
|
|
|
|
{
|
|
|
|
/*
|
2022-02-18 17:43:04 +01:00
|
|
|
* Notify event procs of successful reset.
|
2008-09-17 06:31:08 +02:00
|
|
|
*/
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < conn->nEvents; i++)
|
|
|
|
{
|
|
|
|
PGEventConnReset evt;
|
|
|
|
|
|
|
|
evt.conn = conn;
|
2022-02-18 17:43:04 +01:00
|
|
|
(void) conn->events[i].proc(PGEVT_CONNRESET, &evt,
|
|
|
|
conn->events[i].passThrough);
|
2008-09-17 06:31:08 +02:00
|
|
|
}
|
|
|
|
}
|
1996-07-12 06:53:59 +02:00
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-12 02:00:21 +02:00
|
|
|
/*
|
|
|
|
* PQresetStart:
|
|
|
|
* resets the connection to the backend
|
|
|
|
* closes the existing connection and makes a new one
|
|
|
|
* Returns 1 on success, 0 on failure.
|
|
|
|
*/
|
1999-11-30 04:08:19 +01:00
|
|
|
int
|
|
|
|
PQresetStart(PGconn *conn)
|
|
|
|
{
|
|
|
|
if (conn)
|
|
|
|
{
|
|
|
|
closePGconn(conn);
|
|
|
|
|
|
|
|
return connectDBStart(conn);
|
|
|
|
}
|
|
|
|
|
2000-01-14 06:33:15 +01:00
|
|
|
return 0;
|
1999-11-30 04:08:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-12 02:00:21 +02:00
|
|
|
/*
|
|
|
|
* PQresetPoll:
|
|
|
|
* resets the connection to the backend
|
|
|
|
* closes the existing connection and makes a new one
|
|
|
|
*/
|
1999-11-30 04:08:19 +01:00
|
|
|
PostgresPollingStatusType
|
|
|
|
PQresetPoll(PGconn *conn)
|
|
|
|
{
|
|
|
|
if (conn)
|
2008-09-17 06:31:08 +02:00
|
|
|
{
|
|
|
|
PostgresPollingStatusType status = PQconnectPoll(conn);
|
|
|
|
|
|
|
|
if (status == PGRES_POLLING_OK)
|
|
|
|
{
|
|
|
|
/*
|
2022-02-18 17:43:04 +01:00
|
|
|
* Notify event procs of successful reset.
|
2008-09-17 06:31:08 +02:00
|
|
|
*/
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < conn->nEvents; i++)
|
|
|
|
{
|
|
|
|
PGEventConnReset evt;
|
|
|
|
|
|
|
|
evt.conn = conn;
|
2022-02-18 17:43:04 +01:00
|
|
|
(void) conn->events[i].proc(PGEVT_CONNRESET, &evt,
|
|
|
|
conn->events[i].passThrough);
|
2008-09-17 06:31:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
1999-11-30 04:08:19 +01:00
|
|
|
|
|
|
|
return PGRES_POLLING_FAILED;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2004-10-31 01:11:27 +02:00
|
|
|
/*
|
2016-04-05 11:05:01 +02:00
|
|
|
* PQgetCancel: get a PGcancel structure corresponding to a connection.
|
2004-10-31 01:11:27 +02:00
|
|
|
*
|
|
|
|
* A copy is needed to be able to cancel a running query from a different
|
|
|
|
* thread. If the same structure is used all structure members would have
|
|
|
|
* to be individually locked (if the entire structure was locked, it would
|
2006-05-19 16:26:58 +02:00
|
|
|
* be impossible to cancel a synchronous query because the structure would
|
2004-10-31 01:11:27 +02:00
|
|
|
* have to stay locked for the duration of the query).
|
|
|
|
*/
|
|
|
|
PGcancel *
|
|
|
|
PQgetCancel(PGconn *conn)
|
|
|
|
{
|
|
|
|
PGcancel *cancel;
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
return NULL;
|
|
|
|
|
2014-04-17 01:46:51 +02:00
|
|
|
if (conn->sock == PGINVALID_SOCKET)
|
2004-10-31 01:11:27 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cancel = malloc(sizeof(PGcancel));
|
|
|
|
if (cancel == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memcpy(&cancel->raddr, &conn->raddr, sizeof(SockAddr));
|
|
|
|
cancel->be_pid = conn->be_pid;
|
|
|
|
cancel->be_key = conn->be_key;
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
/* We use -1 to indicate an unset connection option */
|
|
|
|
cancel->pgtcp_user_timeout = -1;
|
|
|
|
cancel->keepalives = -1;
|
|
|
|
cancel->keepalives_idle = -1;
|
|
|
|
cancel->keepalives_interval = -1;
|
|
|
|
cancel->keepalives_count = -1;
|
|
|
|
if (conn->pgtcp_user_timeout != NULL)
|
|
|
|
{
|
|
|
|
if (!parse_int_param(conn->pgtcp_user_timeout,
|
|
|
|
&cancel->pgtcp_user_timeout,
|
|
|
|
conn, "tcp_user_timeout"))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (conn->keepalives != NULL)
|
|
|
|
{
|
|
|
|
if (!parse_int_param(conn->keepalives,
|
|
|
|
&cancel->keepalives,
|
|
|
|
conn, "keepalives"))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (conn->keepalives_idle != NULL)
|
|
|
|
{
|
|
|
|
if (!parse_int_param(conn->keepalives_idle,
|
|
|
|
&cancel->keepalives_idle,
|
|
|
|
conn, "keepalives_idle"))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (conn->keepalives_interval != NULL)
|
|
|
|
{
|
|
|
|
if (!parse_int_param(conn->keepalives_interval,
|
|
|
|
&cancel->keepalives_interval,
|
|
|
|
conn, "keepalives_interval"))
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (conn->keepalives_count != NULL)
|
|
|
|
{
|
|
|
|
if (!parse_int_param(conn->keepalives_count,
|
|
|
|
&cancel->keepalives_count,
|
|
|
|
conn, "keepalives_count"))
|
|
|
|
goto fail;
|
|
|
|
}
|
2004-10-31 01:11:27 +02:00
|
|
|
|
|
|
|
return cancel;
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
|
|
|
|
fail:
|
|
|
|
free(cancel);
|
|
|
|
return NULL;
|
2004-10-31 01:11:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* PQfreeCancel: free a cancel structure */
|
|
|
|
void
|
|
|
|
PQfreeCancel(PGcancel *cancel)
|
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(cancel);
|
2004-10-31 01:11:27 +02:00
|
|
|
}
|
|
|
|
|
1998-07-09 05:29:11 +02:00
|
|
|
|
|
|
|
/*
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
* Sets an integer socket option on a TCP socket, if the provided value is
|
|
|
|
* not negative. Returns false if setsockopt fails for some reason.
|
|
|
|
*
|
|
|
|
* CAUTION: This needs to be signal safe, since it's used by PQcancel.
|
|
|
|
*/
|
|
|
|
#if defined(TCP_USER_TIMEOUT) || !defined(WIN32)
|
|
|
|
static bool
|
|
|
|
optional_setsockopt(int fd, int protoid, int optid, int value)
|
|
|
|
{
|
|
|
|
if (value < 0)
|
|
|
|
return true;
|
|
|
|
if (setsockopt(fd, protoid, optid, (char *) &value, sizeof(value)) < 0)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PQcancel: request query cancel
|
1998-07-09 05:29:11 +02:00
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* The return value is true if the cancel request was successfully
|
|
|
|
* dispatched, false if not (in which case an error message is available).
|
1998-07-09 05:29:11 +02:00
|
|
|
* Note: successful dispatch is no guarantee that there will be any effect at
|
|
|
|
* the backend. The application must read the operation result as usual.
|
|
|
|
*
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
* On failure, an error message is stored in *errbuf, which must be of size
|
|
|
|
* errbufsize (recommended size is 256 bytes). *errbuf is not changed on
|
|
|
|
* success return.
|
|
|
|
*
|
1998-07-09 05:29:11 +02:00
|
|
|
* CAUTION: we want this routine to be safely callable from a signal handler
|
|
|
|
* (for example, an application might want to call it in a SIGINT handler).
|
|
|
|
* This means we cannot use any C library routine that might be non-reentrant.
|
|
|
|
* malloc/free are often non-reentrant, and anything that might call them is
|
|
|
|
* just as dangerous. We avoid sprintf here for that reason. Building up
|
|
|
|
* error messages with strcpy/strcat is tedious but should be quite safe.
|
2000-12-18 18:33:42 +01:00
|
|
|
* We also save/restore errno in case the signal handler support doesn't.
|
1998-07-09 05:29:11 +02:00
|
|
|
*/
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
int
|
|
|
|
PQcancel(PGcancel *cancel, char *errbuf, int errbufsize)
|
1998-07-09 05:29:11 +02:00
|
|
|
{
|
2001-08-21 22:39:54 +02:00
|
|
|
int save_errno = SOCK_ERRNO;
|
2014-04-16 16:45:48 +02:00
|
|
|
pgsocket tmpsock = PGINVALID_SOCKET;
|
2004-10-31 01:11:27 +02:00
|
|
|
int maxlen;
|
1998-07-09 05:29:11 +02:00
|
|
|
struct
|
|
|
|
{
|
|
|
|
uint32 packetlen;
|
|
|
|
CancelRequestPacket cp;
|
|
|
|
} crp;
|
|
|
|
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
if (!cancel)
|
|
|
|
{
|
|
|
|
strlcpy(errbuf, "PQcancel() -- no cancel object supplied", errbufsize);
|
|
|
|
/* strlcpy probably doesn't change errno, but be paranoid */
|
|
|
|
SOCK_ERRNO_SET(save_errno);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
1998-07-09 05:29:11 +02:00
|
|
|
/*
|
2004-10-31 01:11:27 +02:00
|
|
|
* We need to open a temporary connection to the postmaster. Do this with
|
|
|
|
* only kernel calls.
|
1998-07-09 05:29:11 +02:00
|
|
|
*/
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
if ((tmpsock = socket(cancel->raddr.addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
|
1998-07-09 05:29:11 +02:00
|
|
|
{
|
2007-02-10 15:58:55 +01:00
|
|
|
strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize);
|
1998-07-09 05:29:11 +02:00
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since this connection will only be used to send a single packet of
|
|
|
|
* data, we don't need NODELAY. We also don't set the socket to
|
|
|
|
* nonblocking mode, because the API definition of PQcancel requires the
|
|
|
|
* cancel to be sent in a blocking way.
|
|
|
|
*
|
|
|
|
* We do set socket options related to keepalives and other TCP timeouts.
|
|
|
|
* This ensures that this function does not block indefinitely when
|
|
|
|
* reasonable keepalive and timeout settings have been provided.
|
|
|
|
*/
|
2022-02-15 10:03:52 +01:00
|
|
|
if (cancel->raddr.addr.ss_family != AF_UNIX &&
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
cancel->keepalives != 0)
|
|
|
|
{
|
|
|
|
#ifndef WIN32
|
|
|
|
if (!optional_setsockopt(tmpsock, SOL_SOCKET, SO_KEEPALIVE, 1))
|
|
|
|
{
|
|
|
|
strlcpy(errbuf, "PQcancel() -- setsockopt(SO_KEEPALIVE) failed: ", errbufsize);
|
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PG_TCP_KEEPALIVE_IDLE
|
|
|
|
if (!optional_setsockopt(tmpsock, IPPROTO_TCP, PG_TCP_KEEPALIVE_IDLE,
|
|
|
|
cancel->keepalives_idle))
|
|
|
|
{
|
|
|
|
strlcpy(errbuf, "PQcancel() -- setsockopt(" PG_TCP_KEEPALIVE_IDLE_STR ") failed: ", errbufsize);
|
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef TCP_KEEPINTVL
|
|
|
|
if (!optional_setsockopt(tmpsock, IPPROTO_TCP, TCP_KEEPINTVL,
|
|
|
|
cancel->keepalives_interval))
|
|
|
|
{
|
|
|
|
strlcpy(errbuf, "PQcancel() -- setsockopt(TCP_KEEPINTVL) failed: ", errbufsize);
|
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef TCP_KEEPCNT
|
|
|
|
if (!optional_setsockopt(tmpsock, IPPROTO_TCP, TCP_KEEPCNT,
|
|
|
|
cancel->keepalives_count))
|
|
|
|
{
|
|
|
|
strlcpy(errbuf, "PQcancel() -- setsockopt(TCP_KEEPCNT) failed: ", errbufsize);
|
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else /* WIN32 */
|
|
|
|
|
|
|
|
#ifdef SIO_KEEPALIVE_VALS
|
|
|
|
if (!setKeepalivesWin32(tmpsock,
|
|
|
|
cancel->keepalives_idle,
|
|
|
|
cancel->keepalives_interval))
|
|
|
|
{
|
|
|
|
strlcpy(errbuf, "PQcancel() -- WSAIoctl(SIO_KEEPALIVE_VALS) failed: ", errbufsize);
|
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
|
|
|
#endif /* SIO_KEEPALIVE_VALS */
|
|
|
|
#endif /* WIN32 */
|
|
|
|
|
|
|
|
/* TCP_USER_TIMEOUT works the same way on Unix and Windows */
|
|
|
|
#ifdef TCP_USER_TIMEOUT
|
|
|
|
if (!optional_setsockopt(tmpsock, IPPROTO_TCP, TCP_USER_TIMEOUT,
|
|
|
|
cancel->pgtcp_user_timeout))
|
|
|
|
{
|
|
|
|
strlcpy(errbuf, "PQcancel() -- setsockopt(TCP_USER_TIMEOUT) failed: ", errbufsize);
|
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2002-04-16 01:34:17 +02:00
|
|
|
retry3:
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
if (connect(tmpsock, (struct sockaddr *) &cancel->raddr.addr,
|
|
|
|
cancel->raddr.salen) < 0)
|
1998-07-09 05:29:11 +02:00
|
|
|
{
|
2002-04-16 01:34:17 +02:00
|
|
|
if (SOCK_ERRNO == EINTR)
|
|
|
|
/* Interrupted system call - we'll just try again */
|
|
|
|
goto retry3;
|
2007-02-10 15:58:55 +01:00
|
|
|
strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize);
|
1998-07-09 05:29:11 +02:00
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
1998-09-01 06:40:42 +02:00
|
|
|
|
1998-07-09 05:29:11 +02:00
|
|
|
/* Create and send the cancel request packet. */
|
|
|
|
|
2017-10-02 00:36:14 +02:00
|
|
|
crp.packetlen = pg_hton32((uint32) sizeof(crp));
|
|
|
|
crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE);
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
crp.cp.backendPID = pg_hton32(cancel->be_pid);
|
|
|
|
crp.cp.cancelAuthCode = pg_hton32(cancel->be_key);
|
1998-07-09 05:29:11 +02:00
|
|
|
|
2002-04-16 01:34:17 +02:00
|
|
|
retry4:
|
1998-07-09 05:29:11 +02:00
|
|
|
if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp))
|
|
|
|
{
|
2002-04-16 01:34:17 +02:00
|
|
|
if (SOCK_ERRNO == EINTR)
|
|
|
|
/* Interrupted system call - we'll just try again */
|
|
|
|
goto retry4;
|
2007-02-10 15:58:55 +01:00
|
|
|
strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize);
|
1998-07-09 05:29:11 +02:00
|
|
|
goto cancel_errReturn;
|
|
|
|
}
|
|
|
|
|
2003-10-02 21:52:44 +02:00
|
|
|
/*
|
|
|
|
* Wait for the postmaster to close the connection, which indicates that
|
|
|
|
* it's processed the request. Without this delay, we might issue another
|
|
|
|
* command only to find that our cancel zaps that command instead of the
|
|
|
|
* one we thought we were canceling. Note we don't actually expect this
|
|
|
|
* read to obtain any data, we are just waiting for EOF to be signaled.
|
|
|
|
*/
|
|
|
|
retry5:
|
|
|
|
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
|
|
|
|
{
|
|
|
|
if (SOCK_ERRNO == EINTR)
|
|
|
|
/* Interrupted system call - we'll just try again */
|
|
|
|
goto retry5;
|
|
|
|
/* we ignore other error conditions */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All done */
|
1998-07-09 05:29:11 +02:00
|
|
|
closesocket(tmpsock);
|
2005-01-05 00:18:25 +01:00
|
|
|
SOCK_ERRNO_SET(save_errno);
|
2017-08-16 06:22:32 +02:00
|
|
|
return true;
|
1998-07-09 05:29:11 +02:00
|
|
|
|
|
|
|
cancel_errReturn:
|
2005-10-15 04:49:52 +02:00
|
|
|
|
2004-10-31 01:11:27 +02:00
|
|
|
/*
|
|
|
|
* Make sure we don't overflow the error buffer. Leave space for the \n at
|
|
|
|
* the end, and for the terminating zero.
|
|
|
|
*/
|
|
|
|
maxlen = errbufsize - strlen(errbuf) - 2;
|
|
|
|
if (maxlen >= 0)
|
1998-07-09 05:29:11 +02:00
|
|
|
{
|
Avoid calling strerror[_r] in PQcancel().
PQcancel() is supposed to be safe to call from a signal handler,
and indeed psql uses it that way. All of the library functions
it uses are specified to be async-signal-safe by POSIX ...
except for strerror. Neither plain strerror nor strerror_r
are considered safe. When this code was written, back in the
dark ages, we probably figured "oh, strerror will just index
into a constant array of strings" ... but in any locale except C,
that's unlikely to be true. Probably the reason we've not heard
complaints is that (a) this error-handling code is unlikely to be
reached in normal use, and (b) in many scenarios, localized error
strings would already have been loaded, after which maybe it's
safe to call strerror here. Still, this is clearly unacceptable.
The best we can do without relying on strerror is to print the
decimal value of errno, so make it do that instead. (This is
probably not much loss of user-friendliness, given that it is
hard to get a failure here.)
Back-patch to all supported branches.
Discussion: https://postgr.es/m/2937814.1641960929@sss.pgh.pa.us
2022-01-17 18:52:44 +01:00
|
|
|
/*
|
|
|
|
* We can't invoke strerror here, since it's not signal-safe. Settle
|
|
|
|
* for printing the decimal value of errno. Even that has to be done
|
|
|
|
* the hard way.
|
|
|
|
*/
|
|
|
|
int val = SOCK_ERRNO;
|
|
|
|
char buf[32];
|
|
|
|
char *bufp;
|
|
|
|
|
|
|
|
bufp = buf + sizeof(buf) - 1;
|
|
|
|
*bufp = '\0';
|
|
|
|
do
|
|
|
|
{
|
|
|
|
*(--bufp) = (val % 10) + '0';
|
|
|
|
val /= 10;
|
|
|
|
} while (val > 0);
|
|
|
|
bufp -= 6;
|
|
|
|
memcpy(bufp, "error ", 6);
|
|
|
|
strncat(errbuf, bufp, maxlen);
|
2004-10-31 01:11:27 +02:00
|
|
|
strcat(errbuf, "\n");
|
|
|
|
}
|
2014-04-16 16:45:48 +02:00
|
|
|
if (tmpsock != PGINVALID_SOCKET)
|
1998-07-09 05:29:11 +02:00
|
|
|
closesocket(tmpsock);
|
2005-01-05 00:18:25 +01:00
|
|
|
SOCK_ERRNO_SET(save_errno);
|
2017-08-16 06:22:32 +02:00
|
|
|
return false;
|
1998-07-09 05:29:11 +02:00
|
|
|
}
|
|
|
|
|
2004-10-31 01:11:27 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PQrequestCancel: old, not thread-safe function for requesting query cancel
|
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* Returns true if able to send the cancel request, false if not.
|
2004-10-31 01:11:27 +02:00
|
|
|
*
|
|
|
|
* On failure, the error message is saved in conn->errorMessage; this means
|
|
|
|
* that this can't be used when there might be other active operations on
|
|
|
|
* the connection object.
|
|
|
|
*
|
|
|
|
* NOTE: error messages will be cut off at the current size of the
|
|
|
|
* error message buffer, since we dare not try to expand conn->errorMessage!
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
PQrequestCancel(PGconn *conn)
|
|
|
|
{
|
|
|
|
int r;
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
PGcancel *cancel;
|
2004-10-31 01:11:27 +02:00
|
|
|
|
|
|
|
/* Check we have an open connection */
|
|
|
|
if (!conn)
|
2017-08-16 06:22:32 +02:00
|
|
|
return false;
|
2004-10-31 01:11:27 +02:00
|
|
|
|
2014-04-17 01:46:51 +02:00
|
|
|
if (conn->sock == PGINVALID_SOCKET)
|
2004-10-31 01:11:27 +02:00
|
|
|
{
|
2007-02-10 15:58:55 +01:00
|
|
|
strlcpy(conn->errorMessage.data,
|
2004-10-31 01:11:27 +02:00
|
|
|
"PQrequestCancel() -- connection is not open\n",
|
|
|
|
conn->errorMessage.maxlen);
|
|
|
|
conn->errorMessage.len = strlen(conn->errorMessage.data);
|
2022-02-20 21:02:41 +01:00
|
|
|
conn->errorReported = 0;
|
2004-10-31 01:11:27 +02:00
|
|
|
|
2017-08-16 06:22:32 +02:00
|
|
|
return false;
|
2004-10-31 01:11:27 +02:00
|
|
|
}
|
|
|
|
|
Make PQcancel use the PGconn's tcp_user_timeout and keepalives settings.
If connectivity to the server has been lost or become flaky, the
user might well try to send a query cancel. It's highly annoying
if PQcancel hangs up in such a case, but that's exactly what's likely
to happen. To ameliorate this problem, apply the PGconn's
tcp_user_timeout and keepalives settings to the TCP connection used
to send the cancel. This should be safe on Unix machines, since POSIX
specifies that setsockopt() is async-signal-safe. We are guessing
that WSAIoctl(SIO_KEEPALIVE_VALS) is similarly safe on Windows.
(Note that at least in psql and our other frontend programs, there's
no safety issue involved anyway, since we run PQcancel in its own
thread rather than in a signal handler.)
Most of the value here comes from the expectation that tcp_user_timeout
will be applied as a connection timeout. That appears to happen on
Linux, even though its tcp(7) man page claims differently. The
keepalive options probably won't help much, but as long as we can
apply them for not much code, we might as well.
Jelte Fennema, reviewed by Fujii Masao and myself
Discussion: https://postgr.es/m/AM5PR83MB017870DE81FC84D5E21E9D1EF7AA9@AM5PR83MB0178.EURPRD83.prod.outlook.com
2022-01-18 20:02:43 +01:00
|
|
|
cancel = PQgetCancel(conn);
|
|
|
|
if (cancel)
|
|
|
|
{
|
|
|
|
r = PQcancel(cancel, conn->errorMessage.data,
|
|
|
|
conn->errorMessage.maxlen);
|
|
|
|
PQfreeCancel(cancel);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
strlcpy(conn->errorMessage.data, "out of memory",
|
|
|
|
conn->errorMessage.maxlen);
|
|
|
|
r = false;
|
|
|
|
}
|
2004-10-31 01:11:27 +02:00
|
|
|
|
|
|
|
if (!r)
|
2022-02-20 21:02:41 +01:00
|
|
|
{
|
2004-10-31 01:11:27 +02:00
|
|
|
conn->errorMessage.len = strlen(conn->errorMessage.data);
|
2022-02-20 21:02:41 +01:00
|
|
|
conn->errorReported = 0;
|
|
|
|
}
|
2004-10-31 01:11:27 +02:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
1998-07-09 05:29:11 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2003-04-19 02:02:30 +02:00
|
|
|
* pqPacketSend() -- convenience routine to send a message to server.
|
2003-04-18 00:26:02 +02:00
|
|
|
*
|
|
|
|
* pack_type: the single-byte message type code. (Pass zero for startup
|
|
|
|
* packets, which have no message type code.)
|
|
|
|
*
|
|
|
|
* buf, buf_len: contents of message. The given length includes only what
|
|
|
|
* is in buf; the message type and message length fields are added here.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* RETURNS: STATUS_ERROR if the write fails, STATUS_OK otherwise.
|
|
|
|
* SIDE_EFFECTS: may block.
|
2003-06-08 19:43:00 +02:00
|
|
|
*/
|
1997-03-12 22:23:16 +01:00
|
|
|
int
|
2003-04-18 00:26:02 +02:00
|
|
|
pqPacketSend(PGconn *conn, char pack_type,
|
|
|
|
const void *buf, size_t buf_len)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2003-04-19 02:02:30 +02:00
|
|
|
/* Start the message. */
|
2021-03-04 09:45:55 +01:00
|
|
|
if (pqPutMsgStart(pack_type, conn))
|
1998-01-26 02:42:53 +01:00
|
|
|
return STATUS_ERROR;
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2003-04-18 00:26:02 +02:00
|
|
|
/* Send the message body. */
|
|
|
|
if (pqPutnchar(buf, buf_len, conn))
|
1998-01-26 02:42:53 +01:00
|
|
|
return STATUS_ERROR;
|
1997-12-04 01:28:15 +01:00
|
|
|
|
2003-04-19 02:02:30 +02:00
|
|
|
/* Finish the message. */
|
|
|
|
if (pqPutMsgEnd(conn))
|
|
|
|
return STATUS_ERROR;
|
|
|
|
|
2003-04-18 00:26:02 +02:00
|
|
|
/* Flush to ensure backend gets it. */
|
1998-05-07 01:51:16 +02:00
|
|
|
if (pqFlush(conn))
|
|
|
|
return STATUS_ERROR;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1998-01-26 02:42:53 +01:00
|
|
|
return STATUS_OK;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2006-07-27 15:20:24 +02:00
|
|
|
#ifdef USE_LDAP
|
|
|
|
|
|
|
|
#define LDAP_URL "ldap://"
|
|
|
|
#define LDAP_DEF_PORT 389
|
|
|
|
#define PGLDAP_TIMEOUT 2
|
|
|
|
|
|
|
|
#define ld_is_sp_tab(x) ((x) == ' ' || (x) == '\t')
|
|
|
|
#define ld_is_nl_cr(x) ((x) == '\r' || (x) == '\n')
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ldapServiceLookup
|
|
|
|
*
|
|
|
|
* Search the LDAP URL passed as first argument, treat the result as a
|
|
|
|
* string of connection options that are parsed and added to the array of
|
|
|
|
* options passed as second argument.
|
|
|
|
*
|
|
|
|
* LDAP URLs must conform to RFC 1959 without escape sequences.
|
|
|
|
* ldap://host:port/dn?attributes?scope?filter?extensions
|
|
|
|
*
|
|
|
|
* Returns
|
|
|
|
* 0 if the lookup was successful,
|
|
|
|
* 1 if the connection to the LDAP server could be established but
|
|
|
|
* the search was unsuccessful,
|
|
|
|
* 2 if a connection could not be established, and
|
|
|
|
* 3 if a fatal error occurred.
|
|
|
|
*
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
* An error message is appended to *errorMessage for return codes 1 and 3.
|
2006-07-27 15:20:24 +02:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ldapServiceLookup(const char *purl, PQconninfoOption *options,
|
|
|
|
PQExpBuffer errorMessage)
|
|
|
|
{
|
|
|
|
int port = LDAP_DEF_PORT,
|
|
|
|
scope,
|
|
|
|
rc,
|
|
|
|
size,
|
|
|
|
state,
|
|
|
|
oldstate,
|
|
|
|
i;
|
2014-04-17 22:12:24 +02:00
|
|
|
#ifndef WIN32
|
|
|
|
int msgid;
|
|
|
|
#endif
|
2006-07-27 15:20:24 +02:00
|
|
|
bool found_keyword;
|
|
|
|
char *url,
|
|
|
|
*hostname,
|
|
|
|
*portstr,
|
|
|
|
*endptr,
|
|
|
|
*dn,
|
|
|
|
*scopestr,
|
|
|
|
*filter,
|
|
|
|
*result,
|
|
|
|
*p,
|
|
|
|
*p1 = NULL,
|
|
|
|
*optname = NULL,
|
|
|
|
*optval = NULL;
|
|
|
|
char *attrs[2] = {NULL, NULL};
|
|
|
|
LDAP *ld = NULL;
|
|
|
|
LDAPMessage *res,
|
|
|
|
*entry;
|
|
|
|
struct berval **values;
|
|
|
|
LDAP_TIMEVAL time = {PGLDAP_TIMEOUT, 0};
|
|
|
|
|
|
|
|
if ((url = strdup(purl)) == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage, libpq_gettext("out of memory\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse URL components, check for correctness. Basically, url has '\0'
|
|
|
|
* placed at component boundaries and variables are pointed at each
|
|
|
|
* component.
|
|
|
|
*/
|
|
|
|
|
2006-09-15 23:34:23 +02:00
|
|
|
if (pg_strncasecmp(url, LDAP_URL, strlen(LDAP_URL)) != 0)
|
2006-07-27 15:20:24 +02:00
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-10-06 19:14:01 +02:00
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": scheme must be ldap://\n"), purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* hostname */
|
|
|
|
hostname = url + strlen(LDAP_URL);
|
|
|
|
if (*hostname == '/') /* no hostname? */
|
2010-12-18 17:25:41 +01:00
|
|
|
hostname = DefaultHost; /* the default */
|
2006-07-27 15:20:24 +02:00
|
|
|
|
|
|
|
/* dn, "distinguished name" */
|
|
|
|
p = strchr(url + strlen(LDAP_URL), '/');
|
|
|
|
if (p == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-10-06 19:14:01 +02:00
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": missing distinguished name\n"),
|
|
|
|
purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
*p = '\0'; /* terminate hostname */
|
|
|
|
dn = p + 1;
|
|
|
|
|
|
|
|
/* attribute */
|
|
|
|
if ((p = strchr(dn, '?')) == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-10-06 19:14:01 +02:00
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": must have exactly one attribute\n"),
|
|
|
|
purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
*p = '\0';
|
|
|
|
attrs[0] = p + 1;
|
|
|
|
|
|
|
|
/* scope */
|
|
|
|
if ((p = strchr(attrs[0], '?')) == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n"),
|
|
|
|
purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
*p = '\0';
|
|
|
|
scopestr = p + 1;
|
|
|
|
|
|
|
|
/* filter */
|
|
|
|
if ((p = strchr(scopestr, '?')) == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": no filter\n"),
|
|
|
|
purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
*p = '\0';
|
|
|
|
filter = p + 1;
|
|
|
|
if ((p = strchr(filter, '?')) != NULL)
|
|
|
|
*p = '\0';
|
2000-10-17 19:43:13 +02:00
|
|
|
|
2006-07-27 15:20:24 +02:00
|
|
|
/* port number? */
|
|
|
|
if ((p1 = strchr(hostname, ':')) != NULL)
|
|
|
|
{
|
|
|
|
long lport;
|
|
|
|
|
|
|
|
*p1 = '\0';
|
|
|
|
portstr = p1 + 1;
|
|
|
|
errno = 0;
|
|
|
|
lport = strtol(portstr, &endptr, 10);
|
|
|
|
if (*portstr == '\0' || *endptr != '\0' || errno || lport < 0 || lport > 65535)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-10-06 19:14:01 +02:00
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": invalid port number\n"),
|
|
|
|
purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
port = (int) lport;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allow only one attribute */
|
|
|
|
if (strchr(attrs[0], ',') != NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-10-06 19:14:01 +02:00
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": must have exactly one attribute\n"),
|
|
|
|
purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set scope */
|
2006-09-15 23:34:23 +02:00
|
|
|
if (pg_strcasecmp(scopestr, "base") == 0)
|
2006-07-27 15:20:24 +02:00
|
|
|
scope = LDAP_SCOPE_BASE;
|
2006-09-15 23:34:23 +02:00
|
|
|
else if (pg_strcasecmp(scopestr, "one") == 0)
|
2006-07-27 15:20:24 +02:00
|
|
|
scope = LDAP_SCOPE_ONELEVEL;
|
2006-09-15 23:34:23 +02:00
|
|
|
else if (pg_strcasecmp(scopestr, "sub") == 0)
|
2006-07-27 15:20:24 +02:00
|
|
|
scope = LDAP_SCOPE_SUBTREE;
|
|
|
|
else
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
|
|
|
libpq_gettext("invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n"),
|
|
|
|
purl);
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize LDAP structure */
|
|
|
|
if ((ld = ldap_init(hostname, port)) == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("could not create LDAP structure\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-04-16 17:18:02 +02:00
|
|
|
* Perform an explicit anonymous bind.
|
2014-04-17 22:12:24 +02:00
|
|
|
*
|
|
|
|
* LDAP does not require that an anonymous bind is performed explicitly,
|
2014-04-16 17:18:02 +02:00
|
|
|
* but we want to distinguish between the case where LDAP bind does not
|
|
|
|
* succeed within PGLDAP_TIMEOUT seconds (return 2 to continue parsing the
|
|
|
|
* service control file) and the case where querying the LDAP server fails
|
|
|
|
* (return 1 to end parsing).
|
2014-04-17 22:12:24 +02:00
|
|
|
*
|
2014-04-16 17:18:02 +02:00
|
|
|
* Unfortunately there is no way of setting a timeout that works for both
|
|
|
|
* Windows and OpenLDAP.
|
2006-07-27 15:20:24 +02:00
|
|
|
*/
|
2014-04-16 17:18:02 +02:00
|
|
|
#ifdef WIN32
|
|
|
|
/* the nonstandard ldap_connect function performs an anonymous bind */
|
|
|
|
if (ldap_connect(ld, &time) != LDAP_SUCCESS)
|
|
|
|
{
|
|
|
|
/* error or timeout in ldap_connect */
|
|
|
|
free(url);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
return 2;
|
|
|
|
}
|
2014-04-17 22:12:24 +02:00
|
|
|
#else /* !WIN32 */
|
2014-04-16 17:18:02 +02:00
|
|
|
/* in OpenLDAP, use the LDAP_OPT_NETWORK_TIMEOUT option */
|
|
|
|
if (ldap_set_option(ld, LDAP_OPT_NETWORK_TIMEOUT, &time) != LDAP_SUCCESS)
|
|
|
|
{
|
|
|
|
free(url);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* anonymous bind */
|
2006-07-27 15:20:24 +02:00
|
|
|
if ((msgid = ldap_simple_bind(ld, NULL, NULL)) == -1)
|
|
|
|
{
|
2014-04-16 17:18:02 +02:00
|
|
|
/* error or network timeout */
|
2006-07-27 15:20:24 +02:00
|
|
|
free(url);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* wait some time for the connection to succeed */
|
|
|
|
res = NULL;
|
|
|
|
if ((rc = ldap_result(ld, msgid, LDAP_MSG_ALL, &time, &res)) == -1 ||
|
|
|
|
res == NULL)
|
|
|
|
{
|
2014-04-16 17:18:02 +02:00
|
|
|
/* error or timeout */
|
2006-07-27 15:20:24 +02:00
|
|
|
if (res != NULL)
|
|
|
|
ldap_msgfree(res);
|
|
|
|
free(url);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
ldap_msgfree(res);
|
|
|
|
|
2014-04-16 17:18:02 +02:00
|
|
|
/* reset timeout */
|
|
|
|
time.tv_sec = -1;
|
|
|
|
if (ldap_set_option(ld, LDAP_OPT_NETWORK_TIMEOUT, &time) != LDAP_SUCCESS)
|
|
|
|
{
|
|
|
|
free(url);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
#endif /* WIN32 */
|
|
|
|
|
2006-07-27 15:20:24 +02:00
|
|
|
/* search */
|
|
|
|
res = NULL;
|
|
|
|
if ((rc = ldap_search_st(ld, dn, scope, filter, attrs, 0, &time, &res))
|
|
|
|
!= LDAP_SUCCESS)
|
|
|
|
{
|
|
|
|
if (res != NULL)
|
|
|
|
ldap_msgfree(res);
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-07-27 15:20:24 +02:00
|
|
|
libpq_gettext("lookup on LDAP server failed: %s\n"),
|
|
|
|
ldap_err2string(rc));
|
|
|
|
ldap_unbind(ld);
|
|
|
|
free(url);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* complain if there was not exactly one result */
|
|
|
|
if ((rc = ldap_count_entries(ld, res)) != 1)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
rc ? libpq_gettext("more than one entry found on LDAP lookup\n")
|
|
|
|
: libpq_gettext("no entry found on LDAP lookup\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
ldap_msgfree(res);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
free(url);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get entry */
|
|
|
|
if ((entry = ldap_first_entry(ld, res)) == NULL)
|
|
|
|
{
|
|
|
|
/* should never happen */
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("no entry found on LDAP lookup\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
ldap_msgfree(res);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
free(url);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get values */
|
|
|
|
if ((values = ldap_get_values_len(ld, entry, attrs[0])) == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("attribute has no values on LDAP lookup\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
ldap_msgfree(res);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
free(url);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ldap_msgfree(res);
|
|
|
|
free(url);
|
|
|
|
|
|
|
|
if (values[0] == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("attribute has no values on LDAP lookup\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
ldap_value_free_len(values);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2011-05-12 17:56:38 +02:00
|
|
|
/* concatenate values into a single string with newline terminators */
|
|
|
|
size = 1; /* for the trailing null */
|
|
|
|
for (i = 0; values[i] != NULL; i++)
|
2006-07-27 15:20:24 +02:00
|
|
|
size += values[i]->bv_len + 1;
|
2011-05-12 17:56:38 +02:00
|
|
|
if ((result = malloc(size)) == NULL)
|
2006-07-27 15:20:24 +02:00
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
ldap_value_free_len(values);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
return 3;
|
|
|
|
}
|
2011-05-12 17:56:38 +02:00
|
|
|
p = result;
|
|
|
|
for (i = 0; values[i] != NULL; i++)
|
2006-07-27 15:20:24 +02:00
|
|
|
{
|
2011-05-12 17:56:38 +02:00
|
|
|
memcpy(p, values[i]->bv_val, values[i]->bv_len);
|
2006-07-27 15:20:24 +02:00
|
|
|
p += values[i]->bv_len;
|
|
|
|
*(p++) = '\n';
|
|
|
|
}
|
2011-05-12 17:56:38 +02:00
|
|
|
*p = '\0';
|
2006-07-27 15:20:24 +02:00
|
|
|
|
|
|
|
ldap_value_free_len(values);
|
|
|
|
ldap_unbind(ld);
|
|
|
|
|
|
|
|
/* parse result string */
|
|
|
|
oldstate = state = 0;
|
|
|
|
for (p = result; *p != '\0'; ++p)
|
|
|
|
{
|
|
|
|
switch (state)
|
|
|
|
{
|
|
|
|
case 0: /* between entries */
|
|
|
|
if (!ld_is_sp_tab(*p) && !ld_is_nl_cr(*p))
|
|
|
|
{
|
|
|
|
optname = p;
|
|
|
|
state = 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1: /* in option name */
|
|
|
|
if (ld_is_sp_tab(*p))
|
|
|
|
{
|
|
|
|
*p = '\0';
|
|
|
|
state = 2;
|
|
|
|
}
|
|
|
|
else if (ld_is_nl_cr(*p))
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-07-27 15:20:24 +02:00
|
|
|
libpq_gettext("missing \"=\" after \"%s\" in connection info string\n"),
|
|
|
|
optname);
|
2011-05-12 17:56:38 +02:00
|
|
|
free(result);
|
2006-07-27 15:20:24 +02:00
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
else if (*p == '=')
|
|
|
|
{
|
|
|
|
*p = '\0';
|
|
|
|
state = 3;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2: /* after option name */
|
|
|
|
if (*p == '=')
|
|
|
|
{
|
|
|
|
state = 3;
|
|
|
|
}
|
|
|
|
else if (!ld_is_sp_tab(*p))
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-07-27 15:20:24 +02:00
|
|
|
libpq_gettext("missing \"=\" after \"%s\" in connection info string\n"),
|
|
|
|
optname);
|
2011-05-12 17:56:38 +02:00
|
|
|
free(result);
|
2006-07-27 15:20:24 +02:00
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 3: /* before option value */
|
|
|
|
if (*p == '\'')
|
|
|
|
{
|
|
|
|
optval = p + 1;
|
|
|
|
p1 = p + 1;
|
|
|
|
state = 5;
|
|
|
|
}
|
|
|
|
else if (ld_is_nl_cr(*p))
|
|
|
|
{
|
|
|
|
optval = optname + strlen(optname); /* empty */
|
|
|
|
state = 0;
|
|
|
|
}
|
|
|
|
else if (!ld_is_sp_tab(*p))
|
|
|
|
{
|
|
|
|
optval = p;
|
|
|
|
state = 4;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 4: /* in unquoted option value */
|
|
|
|
if (ld_is_sp_tab(*p) || ld_is_nl_cr(*p))
|
|
|
|
{
|
|
|
|
*p = '\0';
|
|
|
|
state = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 5: /* in quoted option value */
|
|
|
|
if (*p == '\'')
|
|
|
|
{
|
|
|
|
*p1 = '\0';
|
|
|
|
state = 0;
|
|
|
|
}
|
|
|
|
else if (*p == '\\')
|
|
|
|
state = 6;
|
|
|
|
else
|
|
|
|
*(p1++) = *p;
|
|
|
|
break;
|
|
|
|
case 6: /* in quoted option value after escape */
|
|
|
|
*(p1++) = *p;
|
|
|
|
state = 5;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state == 0 && oldstate != 0)
|
|
|
|
{
|
|
|
|
found_keyword = false;
|
|
|
|
for (i = 0; options[i].keyword; i++)
|
|
|
|
{
|
|
|
|
if (strcmp(options[i].keyword, optname) == 0)
|
|
|
|
{
|
|
|
|
if (options[i].val == NULL)
|
2014-11-25 11:55:00 +01:00
|
|
|
{
|
2006-07-27 15:20:24 +02:00
|
|
|
options[i].val = strdup(optval);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!options[i].val)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2014-11-25 11:55:00 +01:00
|
|
|
free(result);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
}
|
2006-07-27 15:20:24 +02:00
|
|
|
found_keyword = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found_keyword)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2006-07-27 15:20:24 +02:00
|
|
|
libpq_gettext("invalid connection option \"%s\"\n"),
|
|
|
|
optname);
|
2011-05-12 17:56:38 +02:00
|
|
|
free(result);
|
2006-07-27 15:20:24 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
optname = NULL;
|
|
|
|
optval = NULL;
|
|
|
|
}
|
|
|
|
oldstate = state;
|
|
|
|
}
|
|
|
|
|
2011-05-12 17:56:38 +02:00
|
|
|
free(result);
|
|
|
|
|
2006-07-27 15:20:24 +02:00
|
|
|
if (state == 5 || state == 6)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("unterminated quoted string in connection info string\n"));
|
2006-07-27 15:20:24 +02:00
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-04-17 22:12:24 +02:00
|
|
|
|
|
|
|
#endif /* USE_LDAP */
|
2000-10-17 19:43:13 +02:00
|
|
|
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
/*
|
|
|
|
* parseServiceInfo: if a service name has been given, look it up and absorb
|
|
|
|
* connection options from it into *options.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, nonzero on failure. On failure, if errorMessage
|
|
|
|
* isn't null, also store an error message there. (Note: the only reason
|
|
|
|
* this function and related ones don't dump core on errorMessage == NULL
|
Rearrange libpq's error reporting to avoid duplicated error text.
Since commit ffa2e4670, libpq accumulates text in conn->errorMessage
across a whole query cycle. In some situations, we may report more
than one error event within a cycle: the easiest case to reach is
where we report a FATAL error message from the server, and then a
bit later we detect loss of connection. Since, historically, each
error PGresult bears the entire content of conn->errorMessage,
this results in duplication of the FATAL message in any output that
concatenates the contents of the PGresults.
Accumulation in errorMessage still seems like a good idea, especially
in view of the number of places that did ad-hoc error concatenation
before ffa2e4670. So to fix this, let's track how much of
conn->errorMessage has been read out into error PGresults, and only
include new text in later PGresults. The tricky part of that is
to be sure that we never discard an error PGresult once made (else
we'd risk dropping some text, a problem much worse than duplication).
While libpq formerly did that in some code paths, a little bit of
rearrangement lets us postpone making an error PGresult at all until
we are about to return it.
A side benefit of that postponement is that it now becomes practical
to return a dummy static PGresult in cases where we hit out-of-memory
while trying to manufacture an error PGresult. This eliminates the
admittedly-very-rare case where we'd return NULL from PQgetResult,
indicating successful query completion, even though what actually
happened was an OOM failure.
Discussion: https://postgr.es/m/ab4288f8-be5c-57fb-2400-e3e857f53e46@enterprisedb.com
2022-02-18 21:35:15 +01:00
|
|
|
* is the undocumented fact that appendPQExpBuffer does nothing when passed
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
* a null PQExpBuffer pointer.)
|
|
|
|
*/
|
2000-12-07 03:04:30 +01:00
|
|
|
static int
|
|
|
|
parseServiceInfo(PQconninfoOption *options, PQExpBuffer errorMessage)
|
|
|
|
{
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
const char *service = conninfo_getval(options, "service");
|
2004-06-03 02:07:38 +02:00
|
|
|
char serviceFile[MAXPGPATH];
|
2010-01-20 22:15:21 +01:00
|
|
|
char *env;
|
2003-12-19 22:50:54 +01:00
|
|
|
bool group_found = false;
|
2010-01-20 22:15:21 +01:00
|
|
|
int status;
|
|
|
|
struct stat stat_buf;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2003-04-28 06:52:13 +02:00
|
|
|
/*
|
|
|
|
* We have to special-case the environment variable PGSERVICE here, since
|
|
|
|
* this is and should be called before inserting environment defaults for
|
|
|
|
* other connection options.
|
|
|
|
*/
|
|
|
|
if (service == NULL)
|
|
|
|
service = getenv("PGSERVICE");
|
|
|
|
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
/* If no service name given, nothing to do */
|
2010-01-20 22:15:21 +01:00
|
|
|
if (service == NULL)
|
|
|
|
return 0;
|
|
|
|
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
/*
|
|
|
|
* Try PGSERVICEFILE if specified, else try ~/.pg_service.conf (if that
|
|
|
|
* exists).
|
|
|
|
*/
|
2010-01-20 22:15:21 +01:00
|
|
|
if ((env = getenv("PGSERVICEFILE")) != NULL)
|
|
|
|
strlcpy(serviceFile, env, sizeof(serviceFile));
|
|
|
|
else
|
|
|
|
{
|
|
|
|
char homedir[MAXPGPATH];
|
|
|
|
|
|
|
|
if (!pqGetHomeDirectory(homedir, sizeof(homedir)))
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
goto next_file;
|
2010-01-20 22:15:21 +01:00
|
|
|
snprintf(serviceFile, MAXPGPATH, "%s/%s", homedir, ".pg_service.conf");
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
if (stat(serviceFile, &stat_buf) != 0)
|
2010-01-20 22:15:21 +01:00
|
|
|
goto next_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = parseServiceFile(serviceFile, service, options, errorMessage, &group_found);
|
|
|
|
if (group_found || status != 0)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
next_file:
|
2010-02-26 03:01:40 +01:00
|
|
|
|
2004-06-03 02:07:38 +02:00
|
|
|
/*
|
|
|
|
* This could be used by any application so we can't use the binary
|
|
|
|
* location to find our config files.
|
|
|
|
*/
|
|
|
|
snprintf(serviceFile, MAXPGPATH, "%s/pg_service.conf",
|
2005-06-12 02:00:21 +02:00
|
|
|
getenv("PGSYSCONFDIR") ? getenv("PGSYSCONFDIR") : SYSCONFDIR);
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
if (stat(serviceFile, &stat_buf) != 0)
|
2010-01-20 22:15:21 +01:00
|
|
|
goto last_file;
|
|
|
|
|
|
|
|
status = parseServiceFile(serviceFile, service, options, errorMessage, &group_found);
|
|
|
|
if (status != 0)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
last_file:
|
|
|
|
if (!group_found)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2010-01-20 22:15:21 +01:00
|
|
|
libpq_gettext("definition of service \"%s\" not found\n"), service);
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
parseServiceFile(const char *serviceFile,
|
|
|
|
const char *service,
|
|
|
|
PQconninfoOption *options,
|
|
|
|
PQExpBuffer errorMessage,
|
|
|
|
bool *group_found)
|
|
|
|
{
|
2020-09-22 21:59:23 +02:00
|
|
|
int result = 0,
|
|
|
|
linenr = 0,
|
2010-01-20 22:15:21 +01:00
|
|
|
i;
|
|
|
|
FILE *f;
|
2020-09-22 21:59:23 +02:00
|
|
|
char *line;
|
2021-06-26 20:20:17 +02:00
|
|
|
char buf[1024];
|
2020-09-22 21:59:23 +02:00
|
|
|
|
|
|
|
*group_found = false;
|
2004-06-03 02:07:38 +02:00
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
f = fopen(serviceFile, "r");
|
|
|
|
if (f == NULL)
|
2000-10-17 03:00:58 +02:00
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage, libpq_gettext("service file \"%s\" not found\n"),
|
2010-01-20 22:15:21 +01:00
|
|
|
serviceFile);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-06-26 20:20:17 +02:00
|
|
|
while ((line = fgets(buf, sizeof(buf), f)) != NULL)
|
2020-09-22 21:59:23 +02:00
|
|
|
{
|
2021-06-26 20:20:17 +02:00
|
|
|
int len;
|
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
linenr++;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2021-06-26 20:20:17 +02:00
|
|
|
if (strlen(line) >= sizeof(buf) - 1)
|
|
|
|
{
|
|
|
|
appendPQExpBuffer(errorMessage,
|
|
|
|
libpq_gettext("line %d too long in service file \"%s\"\n"),
|
|
|
|
linenr,
|
|
|
|
serviceFile);
|
|
|
|
result = 2;
|
|
|
|
goto exit;
|
|
|
|
}
|
2020-09-22 21:59:23 +02:00
|
|
|
|
2021-06-26 20:20:17 +02:00
|
|
|
/* ignore whitespace at end of line, especially the newline */
|
|
|
|
len = strlen(line);
|
|
|
|
while (len > 0 && isspace((unsigned char) line[len - 1]))
|
|
|
|
line[--len] = '\0';
|
2001-03-22 05:01:46 +01:00
|
|
|
|
Fix failures to ignore \r when reading Windows-style newlines.
libpq failed to ignore Windows-style newlines in connection service files.
This normally wasn't a problem on Windows itself, because fgets() would
convert \r\n to just \n. But if libpq were running inside a program that
changes the default fopen mode to binary, it would see the \r's and think
they were data. In any case, it's project policy to ignore \r in text
files unconditionally, because people sometimes try to use files with
DOS-style newlines on Unix machines, where the C library won't hide that
from us.
Hence, adjust parseServiceFile() to ignore \r as well as \n at the end of
the line. In HEAD, go a little further and make it ignore all trailing
whitespace, to match what it's always done with leading whitespace.
In HEAD, also run around and fix up everyplace where we have
newline-chomping code to make all those places look consistent and
uniformly drop \r. It is not clear whether any of those changes are
fixing live bugs. Most of the non-cosmetic changes are in places that
are reading popen output, and the jury is still out as to whether popen
on Windows can return \r\n. (The Windows-specific code in pipe_read_line
seems to think so, but our lack of support for this elsewhere suggests
maybe it's not a problem in practice.) Hence, I desisted from applying
those changes to back branches, except in run_ssl_passphrase_command()
which is new enough and little-tested enough that we'd probably not have
heard about any problems there.
Tom Lane and Michael Paquier, per bug #15827 from Jorge Gustavo Rocha.
Back-patch the parseServiceFile() change to all supported branches,
and the run_ssl_passphrase_command() change to v11 where that was added.
Discussion: https://postgr.es/m/15827-e6ba53a3a7ed543c@postgresql.org
2019-07-25 18:10:54 +02:00
|
|
|
/* ignore leading whitespace too */
|
2010-01-20 22:15:21 +01:00
|
|
|
while (*line && isspace((unsigned char) line[0]))
|
|
|
|
line++;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
/* ignore comments and empty lines */
|
Fix failures to ignore \r when reading Windows-style newlines.
libpq failed to ignore Windows-style newlines in connection service files.
This normally wasn't a problem on Windows itself, because fgets() would
convert \r\n to just \n. But if libpq were running inside a program that
changes the default fopen mode to binary, it would see the \r's and think
they were data. In any case, it's project policy to ignore \r in text
files unconditionally, because people sometimes try to use files with
DOS-style newlines on Unix machines, where the C library won't hide that
from us.
Hence, adjust parseServiceFile() to ignore \r as well as \n at the end of
the line. In HEAD, go a little further and make it ignore all trailing
whitespace, to match what it's always done with leading whitespace.
In HEAD, also run around and fix up everyplace where we have
newline-chomping code to make all those places look consistent and
uniformly drop \r. It is not clear whether any of those changes are
fixing live bugs. Most of the non-cosmetic changes are in places that
are reading popen output, and the jury is still out as to whether popen
on Windows can return \r\n. (The Windows-specific code in pipe_read_line
seems to think so, but our lack of support for this elsewhere suggests
maybe it's not a problem in practice.) Hence, I desisted from applying
those changes to back branches, except in run_ssl_passphrase_command()
which is new enough and little-tested enough that we'd probably not have
heard about any problems there.
Tom Lane and Michael Paquier, per bug #15827 from Jorge Gustavo Rocha.
Back-patch the parseServiceFile() change to all supported branches,
and the run_ssl_passphrase_command() change to v11 where that was added.
Discussion: https://postgr.es/m/15827-e6ba53a3a7ed543c@postgresql.org
2019-07-25 18:10:54 +02:00
|
|
|
if (line[0] == '\0' || line[0] == '#')
|
2010-01-20 22:15:21 +01:00
|
|
|
continue;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
/* Check for right groupname */
|
|
|
|
if (line[0] == '[')
|
|
|
|
{
|
|
|
|
if (*group_found)
|
2000-10-17 03:00:58 +02:00
|
|
|
{
|
2020-09-22 21:59:23 +02:00
|
|
|
/* end of desired group reached; return success */
|
|
|
|
goto exit;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2010-01-20 22:15:21 +01:00
|
|
|
|
|
|
|
if (strncmp(line + 1, service, strlen(service)) == 0 &&
|
|
|
|
line[strlen(service) + 1] == ']')
|
|
|
|
*group_found = true;
|
2000-10-17 03:00:58 +02:00
|
|
|
else
|
2010-01-20 22:15:21 +01:00
|
|
|
*group_found = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (*group_found)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2010-01-20 22:15:21 +01:00
|
|
|
/*
|
|
|
|
* Finally, we are in the right group and can parse the line
|
|
|
|
*/
|
|
|
|
char *key,
|
|
|
|
*val;
|
|
|
|
bool found_keyword;
|
2000-10-17 03:00:58 +02:00
|
|
|
|
2006-07-27 15:20:24 +02:00
|
|
|
#ifdef USE_LDAP
|
2010-01-20 22:15:21 +01:00
|
|
|
if (strncmp(line, "ldap", 4) == 0)
|
|
|
|
{
|
|
|
|
int rc = ldapServiceLookup(line, options, errorMessage);
|
2006-10-04 02:30:14 +02:00
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
/* if rc = 2, go on reading for fallback */
|
|
|
|
switch (rc)
|
|
|
|
{
|
|
|
|
case 0:
|
2020-09-22 21:59:23 +02:00
|
|
|
goto exit;
|
2010-01-20 22:15:21 +01:00
|
|
|
case 1:
|
|
|
|
case 3:
|
2020-09-22 21:59:23 +02:00
|
|
|
result = 3;
|
|
|
|
goto exit;
|
2010-01-20 22:15:21 +01:00
|
|
|
case 2:
|
|
|
|
continue;
|
2006-07-27 15:20:24 +02:00
|
|
|
}
|
2010-01-20 22:15:21 +01:00
|
|
|
}
|
2006-07-27 15:20:24 +02:00
|
|
|
#endif
|
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
key = line;
|
|
|
|
val = strchr(line, '=');
|
|
|
|
if (val == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2010-01-20 22:15:21 +01:00
|
|
|
libpq_gettext("syntax error in service file \"%s\", line %d\n"),
|
|
|
|
serviceFile,
|
|
|
|
linenr);
|
2020-09-22 21:59:23 +02:00
|
|
|
result = 3;
|
|
|
|
goto exit;
|
2010-01-20 22:15:21 +01:00
|
|
|
}
|
|
|
|
*val++ = '\0';
|
2003-01-08 17:21:53 +01:00
|
|
|
|
2015-04-08 16:26:21 +02:00
|
|
|
if (strcmp(key, "service") == 0)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2015-04-08 16:26:21 +02:00
|
|
|
libpq_gettext("nested service specifications not supported in service file \"%s\", line %d\n"),
|
|
|
|
serviceFile,
|
|
|
|
linenr);
|
2020-09-22 21:59:23 +02:00
|
|
|
result = 3;
|
|
|
|
goto exit;
|
2015-04-08 16:26:21 +02:00
|
|
|
}
|
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
/*
|
|
|
|
* Set the parameter --- but don't override any previous
|
|
|
|
* explicit setting.
|
|
|
|
*/
|
|
|
|
found_keyword = false;
|
|
|
|
for (i = 0; options[i].keyword; i++)
|
|
|
|
{
|
|
|
|
if (strcmp(options[i].keyword, key) == 0)
|
2001-03-22 05:01:46 +01:00
|
|
|
{
|
2010-01-20 22:15:21 +01:00
|
|
|
if (options[i].val == NULL)
|
|
|
|
options[i].val = strdup(val);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!options[i].val)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2020-09-22 21:59:23 +02:00
|
|
|
result = 3;
|
|
|
|
goto exit;
|
2014-11-25 11:55:00 +01:00
|
|
|
}
|
2010-01-20 22:15:21 +01:00
|
|
|
found_keyword = true;
|
|
|
|
break;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
2010-01-20 22:15:21 +01:00
|
|
|
}
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2010-01-20 22:15:21 +01:00
|
|
|
if (!found_keyword)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2010-01-20 22:15:21 +01:00
|
|
|
libpq_gettext("syntax error in service file \"%s\", line %d\n"),
|
|
|
|
serviceFile,
|
|
|
|
linenr);
|
2020-09-22 21:59:23 +02:00
|
|
|
result = 3;
|
|
|
|
goto exit;
|
2001-03-22 05:01:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-22 21:59:23 +02:00
|
|
|
exit:
|
2010-01-20 22:15:21 +01:00
|
|
|
fclose(f);
|
|
|
|
|
2020-09-22 21:59:23 +02:00
|
|
|
return result;
|
2000-10-17 03:00:58 +02:00
|
|
|
}
|
|
|
|
|
1998-01-26 02:42:53 +01:00
|
|
|
|
2008-09-22 15:55:14 +02:00
|
|
|
/*
|
|
|
|
* PQconninfoParse
|
|
|
|
*
|
|
|
|
* Parse a string like PQconnectdb() would do and return the
|
|
|
|
* resulting connection options array. NULL is returned on failure.
|
|
|
|
* The result contains only options specified directly in the string,
|
|
|
|
* not any possible default values.
|
|
|
|
*
|
|
|
|
* If errmsg isn't NULL, *errmsg is set to NULL on success, or a malloc'd
|
|
|
|
* string on failure (use PQfreemem to free it). In out-of-memory conditions
|
|
|
|
* both *errmsg and the result could be NULL.
|
|
|
|
*
|
|
|
|
* NOTE: the returned array is dynamically allocated and should
|
|
|
|
* be freed when no longer needed via PQconninfoFree().
|
|
|
|
*/
|
|
|
|
PQconninfoOption *
|
|
|
|
PQconninfoParse(const char *conninfo, char **errmsg)
|
|
|
|
{
|
|
|
|
PQExpBufferData errorBuf;
|
|
|
|
PQconninfoOption *connOptions;
|
|
|
|
|
|
|
|
if (errmsg)
|
|
|
|
*errmsg = NULL; /* default */
|
|
|
|
initPQExpBuffer(&errorBuf);
|
2011-10-19 03:44:23 +02:00
|
|
|
if (PQExpBufferDataBroken(errorBuf))
|
2008-09-22 15:55:14 +02:00
|
|
|
return NULL; /* out of memory already :-( */
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
connOptions = parse_connection_string(conninfo, &errorBuf, false);
|
2008-09-22 15:55:14 +02:00
|
|
|
if (connOptions == NULL && errmsg)
|
|
|
|
*errmsg = errorBuf.data;
|
|
|
|
else
|
|
|
|
termPQExpBuffer(&errorBuf);
|
|
|
|
return connOptions;
|
|
|
|
}
|
|
|
|
|
2012-03-22 17:08:34 +01:00
|
|
|
/*
|
|
|
|
* Build a working copy of the constant PQconninfoOptions array.
|
|
|
|
*/
|
|
|
|
static PQconninfoOption *
|
|
|
|
conninfo_init(PQExpBuffer errorMessage)
|
|
|
|
{
|
|
|
|
PQconninfoOption *options;
|
2012-11-30 07:09:18 +01:00
|
|
|
PQconninfoOption *opt_dest;
|
|
|
|
const internalPQconninfoOption *cur_opt;
|
2012-03-22 17:08:34 +01:00
|
|
|
|
2012-11-30 07:09:18 +01:00
|
|
|
/*
|
|
|
|
* Get enough memory for all options in PQconninfoOptions, even if some
|
|
|
|
* end up being filtered out.
|
|
|
|
*/
|
|
|
|
options = (PQconninfoOption *) malloc(sizeof(PQconninfoOption) * sizeof(PQconninfoOptions) / sizeof(PQconninfoOptions[0]));
|
2012-03-22 17:08:34 +01:00
|
|
|
if (options == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2012-03-22 17:08:34 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2012-11-30 07:09:18 +01:00
|
|
|
opt_dest = options;
|
|
|
|
|
|
|
|
for (cur_opt = PQconninfoOptions; cur_opt->keyword; cur_opt++)
|
|
|
|
{
|
|
|
|
/* Only copy the public part of the struct, not the full internal */
|
|
|
|
memcpy(opt_dest, cur_opt, sizeof(PQconninfoOption));
|
|
|
|
opt_dest++;
|
|
|
|
}
|
|
|
|
MemSet(opt_dest, 0, sizeof(PQconninfoOption));
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2012-03-22 17:08:34 +01:00
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
2001-08-17 17:11:15 +02:00
|
|
|
/*
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* Connection string parser
|
2000-03-11 04:08:37 +01:00
|
|
|
*
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* Returns a malloc'd PQconninfoOption array, if parsing is successful.
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
* Otherwise, NULL is returned and an error message is added to errorMessage.
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* If use_defaults is true, default values are filled in (from a service file,
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* environment variables, etc).
|
|
|
|
*/
|
|
|
|
static PQconninfoOption *
|
|
|
|
parse_connection_string(const char *connstr, PQExpBuffer errorMessage,
|
|
|
|
bool use_defaults)
|
|
|
|
{
|
|
|
|
/* Parse as URI if connection string matches URI prefix */
|
2015-04-02 16:10:22 +02:00
|
|
|
if (uri_prefix_length(connstr) != 0)
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
return conninfo_uri_parse(connstr, errorMessage, use_defaults);
|
|
|
|
|
|
|
|
/* Parse as default otherwise */
|
|
|
|
return conninfo_parse(connstr, errorMessage, use_defaults);
|
|
|
|
}
|
|
|
|
|
2015-04-02 16:10:22 +02:00
|
|
|
/*
|
|
|
|
* Checks if connection string starts with either of the valid URI prefix
|
|
|
|
* designators.
|
|
|
|
*
|
|
|
|
* Returns the URI prefix length, 0 if the string doesn't contain a URI prefix.
|
psql: fix \connect with URIs and conninfo strings
This is the second try at this, after fcef1617295 failed miserably and
had to be reverted: as it turns out, libpq cannot depend on libpgcommon
after all. Instead of shuffling code in the master branch, make that one
just like 9.4 and accept the duplication. (This was all my own mistake,
not the patch submitter's).
psql was already accepting conninfo strings as the first parameter in
\connect, but the way it worked wasn't sane; some of the other
parameters would get the previous connection's values, causing it to
connect to a completely unexpected server or, more likely, not finding
any server at all because of completely wrong combinations of
parameters.
Fix by explicitely checking for a conninfo-looking parameter in the
dbname position; if one is found, use its complete specification rather
than mix with the other arguments. Also, change tab-completion to not
try to complete conninfo/URI-looking "dbnames" and document that
conninfos are accepted as first argument.
There was a weak consensus to backpatch this, because while the behavior
of using the dbname as a conninfo is nowhere documented for \connect, it
is reasonable to expect that it works because it does work in many other
contexts. Therefore this is backpatched all the way back to 9.0.
Author: David Fetter, Andrew Dunstan. Some editorialization by me
(probably earning a Gierth's "Sloppy" badge in the process.)
Reviewers: Andrew Gierth, Erik Rijkers, Pavel Stěhule, Stephen Frost,
Robert Haas, Andrew Dunstan.
2015-04-02 17:30:57 +02:00
|
|
|
*
|
|
|
|
* XXX this is duplicated in psql/common.c.
|
2015-04-02 16:10:22 +02:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
uri_prefix_length(const char *connstr)
|
|
|
|
{
|
|
|
|
if (strncmp(connstr, uri_designator,
|
|
|
|
sizeof(uri_designator) - 1) == 0)
|
|
|
|
return sizeof(uri_designator) - 1;
|
|
|
|
|
|
|
|
if (strncmp(connstr, short_uri_designator,
|
|
|
|
sizeof(short_uri_designator) - 1) == 0)
|
|
|
|
return sizeof(short_uri_designator) - 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recognized connection string either starts with a valid URI prefix or
|
|
|
|
* contains a "=" in it.
|
|
|
|
*
|
|
|
|
* Must be consistent with parse_connection_string: anything for which this
|
|
|
|
* returns true should at least look like it's parseable by that routine.
|
psql: fix \connect with URIs and conninfo strings
This is the second try at this, after fcef1617295 failed miserably and
had to be reverted: as it turns out, libpq cannot depend on libpgcommon
after all. Instead of shuffling code in the master branch, make that one
just like 9.4 and accept the duplication. (This was all my own mistake,
not the patch submitter's).
psql was already accepting conninfo strings as the first parameter in
\connect, but the way it worked wasn't sane; some of the other
parameters would get the previous connection's values, causing it to
connect to a completely unexpected server or, more likely, not finding
any server at all because of completely wrong combinations of
parameters.
Fix by explicitely checking for a conninfo-looking parameter in the
dbname position; if one is found, use its complete specification rather
than mix with the other arguments. Also, change tab-completion to not
try to complete conninfo/URI-looking "dbnames" and document that
conninfos are accepted as first argument.
There was a weak consensus to backpatch this, because while the behavior
of using the dbname as a conninfo is nowhere documented for \connect, it
is reasonable to expect that it works because it does work in many other
contexts. Therefore this is backpatched all the way back to 9.0.
Author: David Fetter, Andrew Dunstan. Some editorialization by me
(probably earning a Gierth's "Sloppy" badge in the process.)
Reviewers: Andrew Gierth, Erik Rijkers, Pavel Stěhule, Stephen Frost,
Robert Haas, Andrew Dunstan.
2015-04-02 17:30:57 +02:00
|
|
|
*
|
|
|
|
* XXX this is duplicated in psql/common.c
|
2015-04-02 16:10:22 +02:00
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
recognized_connection_string(const char *connstr)
|
|
|
|
{
|
|
|
|
return uri_prefix_length(connstr) != 0 || strchr(connstr, '=') != NULL;
|
|
|
|
}
|
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
/*
|
|
|
|
* Subroutine for parse_connection_string
|
|
|
|
*
|
|
|
|
* Deal with a string containing key=value pairs.
|
1996-11-09 11:39:54 +01:00
|
|
|
*/
|
2000-03-11 04:08:37 +01:00
|
|
|
static PQconninfoOption *
|
2007-12-09 20:01:40 +01:00
|
|
|
conninfo_parse(const char *conninfo, PQExpBuffer errorMessage,
|
2008-09-22 16:21:44 +02:00
|
|
|
bool use_defaults)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
|
|
|
char *pname;
|
|
|
|
char *pval;
|
|
|
|
char *buf;
|
|
|
|
char *cp;
|
|
|
|
char *cp2;
|
2000-03-11 04:08:37 +01:00
|
|
|
PQconninfoOption *options;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-03-11 04:08:37 +01:00
|
|
|
/* Make a working copy of PQconninfoOptions */
|
2012-03-22 17:08:34 +01:00
|
|
|
options = conninfo_init(errorMessage);
|
2000-03-11 04:08:37 +01:00
|
|
|
if (options == NULL)
|
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-03-11 04:08:37 +01:00
|
|
|
/* Need a modifiable copy of the input string */
|
1996-11-09 11:39:54 +01:00
|
|
|
if ((buf = strdup(conninfo)) == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2000-03-11 04:08:37 +01:00
|
|
|
PQconninfoFree(options);
|
|
|
|
return NULL;
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
|
|
|
cp = buf;
|
|
|
|
|
|
|
|
while (*cp)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1996-11-09 11:39:54 +01:00
|
|
|
/* Skip blanks before the parameter name */
|
2000-12-03 21:45:40 +01:00
|
|
|
if (isspace((unsigned char) *cp))
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
|
|
|
cp++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the parameter name */
|
|
|
|
pname = cp;
|
|
|
|
while (*cp)
|
|
|
|
{
|
|
|
|
if (*cp == '=')
|
1997-09-07 07:04:48 +02:00
|
|
|
break;
|
2000-12-03 21:45:40 +01:00
|
|
|
if (isspace((unsigned char) *cp))
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1996-11-09 11:39:54 +01:00
|
|
|
*cp++ = '\0';
|
|
|
|
while (*cp)
|
|
|
|
{
|
2000-12-03 21:45:40 +01:00
|
|
|
if (!isspace((unsigned char) *cp))
|
1997-09-07 07:04:48 +02:00
|
|
|
break;
|
|
|
|
cp++;
|
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
cp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check that there is a following '=' */
|
|
|
|
if (*cp != '=')
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2001-07-15 15:45:04 +02:00
|
|
|
libpq_gettext("missing \"=\" after \"%s\" in connection info string\n"),
|
1999-08-31 03:37:37 +02:00
|
|
|
pname);
|
2000-03-11 04:08:37 +01:00
|
|
|
PQconninfoFree(options);
|
1996-11-09 11:39:54 +01:00
|
|
|
free(buf);
|
2000-03-11 04:08:37 +01:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
*cp++ = '\0';
|
|
|
|
|
|
|
|
/* Skip blanks after the '=' */
|
|
|
|
while (*cp)
|
|
|
|
{
|
2000-12-03 21:45:40 +01:00
|
|
|
if (!isspace((unsigned char) *cp))
|
1996-11-09 11:39:54 +01:00
|
|
|
break;
|
1997-09-07 07:04:48 +02:00
|
|
|
cp++;
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-03-11 04:08:37 +01:00
|
|
|
/* Get the parameter value */
|
1997-09-07 07:04:48 +02:00
|
|
|
pval = cp;
|
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
if (*cp != '\'')
|
|
|
|
{
|
|
|
|
cp2 = pval;
|
|
|
|
while (*cp)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2000-12-03 21:45:40 +01:00
|
|
|
if (isspace((unsigned char) *cp))
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1996-11-09 11:39:54 +01:00
|
|
|
*cp++ = '\0';
|
1997-09-07 07:04:48 +02:00
|
|
|
break;
|
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
if (*cp == '\\')
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1996-11-09 11:39:54 +01:00
|
|
|
cp++;
|
|
|
|
if (*cp != '\0')
|
|
|
|
*cp2++ = *cp++;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
else
|
|
|
|
*cp2++ = *cp++;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
*cp2 = '\0';
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cp2 = pval;
|
|
|
|
cp++;
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
if (*cp == '\0')
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("unterminated quoted string in connection info string\n"));
|
2000-03-11 04:08:37 +01:00
|
|
|
PQconninfoFree(options);
|
1996-11-09 11:39:54 +01:00
|
|
|
free(buf);
|
2000-03-11 04:08:37 +01:00
|
|
|
return NULL;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
if (*cp == '\\')
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
|
|
|
cp++;
|
1996-11-09 11:39:54 +01:00
|
|
|
if (*cp != '\0')
|
|
|
|
*cp2++ = *cp++;
|
|
|
|
continue;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
if (*cp == '\'')
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1996-11-09 11:39:54 +01:00
|
|
|
*cp2 = '\0';
|
1997-09-07 07:04:48 +02:00
|
|
|
cp++;
|
|
|
|
break;
|
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
*cp2++ = *cp++;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1996-11-09 11:39:54 +01:00
|
|
|
/*
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* Now that we have the name and the value, store the record.
|
1996-11-09 11:39:54 +01:00
|
|
|
*/
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (!conninfo_storeval(options, pname, pval, errorMessage, false, false))
|
2005-06-12 02:00:21 +02:00
|
|
|
{
|
|
|
|
PQconninfoFree(options);
|
|
|
|
free(buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
2000-10-17 03:00:58 +02:00
|
|
|
}
|
|
|
|
|
2003-04-28 06:29:12 +02:00
|
|
|
/* Done with the modifiable input string */
|
|
|
|
free(buf);
|
|
|
|
|
2008-09-22 15:55:14 +02:00
|
|
|
/*
|
2012-03-22 17:08:34 +01:00
|
|
|
* Add in defaults if the caller wants that.
|
1996-11-09 11:39:54 +01:00
|
|
|
*/
|
2012-03-22 17:08:34 +01:00
|
|
|
if (use_defaults)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
2012-03-22 17:08:34 +01:00
|
|
|
if (!conninfo_add_defaults(options, errorMessage))
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
2012-03-22 17:08:34 +01:00
|
|
|
PQconninfoFree(options);
|
|
|
|
return NULL;
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-03-11 04:08:37 +01:00
|
|
|
return options;
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
|
|
|
|
2010-01-28 07:28:26 +01:00
|
|
|
/*
|
|
|
|
* Conninfo array parser routine
|
|
|
|
*
|
|
|
|
* If successful, a malloc'd PQconninfoOption array is returned.
|
|
|
|
* If not successful, NULL is returned and an error message is
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
* appended to errorMessage.
|
2010-01-28 07:28:26 +01:00
|
|
|
* Defaults are supplied (from a service file, environment variables, etc)
|
2017-08-16 06:22:32 +02:00
|
|
|
* for unspecified options, but only if use_defaults is true.
|
2010-02-05 04:09:05 +01:00
|
|
|
*
|
2014-11-25 16:12:07 +01:00
|
|
|
* If expand_dbname is non-zero, and the value passed for the first occurrence
|
|
|
|
* of "dbname" keyword is a connection string (as indicated by
|
2015-04-02 16:10:22 +02:00
|
|
|
* recognized_connection_string) then parse and process it, overriding any
|
2014-11-25 16:12:07 +01:00
|
|
|
* previously processed conflicting keywords. Subsequent keywords will take
|
2016-08-08 16:07:46 +02:00
|
|
|
* precedence, however. In-tree programs generally specify expand_dbname=true,
|
|
|
|
* so command-line arguments naming a database can use a connection string.
|
|
|
|
* Some code acquires arbitrary database names from known-literal sources like
|
|
|
|
* PQdb(), PQconninfoParse() and pg_database.datname. When connecting to such
|
|
|
|
* a database, in-tree code first wraps the name in a connection string.
|
2010-01-28 07:28:26 +01:00
|
|
|
*/
|
|
|
|
static PQconninfoOption *
|
2011-09-26 00:52:48 +02:00
|
|
|
conninfo_array_parse(const char *const *keywords, const char *const *values,
|
2010-02-05 04:09:05 +01:00
|
|
|
PQExpBuffer errorMessage, bool use_defaults,
|
|
|
|
int expand_dbname)
|
2010-01-28 07:28:26 +01:00
|
|
|
{
|
|
|
|
PQconninfoOption *options;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
PQconninfoOption *dbname_options = NULL;
|
2010-01-28 07:28:26 +01:00
|
|
|
PQconninfoOption *option;
|
|
|
|
int i = 0;
|
|
|
|
|
2010-02-05 04:09:05 +01:00
|
|
|
/*
|
|
|
|
* If expand_dbname is non-zero, check keyword "dbname" to see if val is
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* actually a recognized connection string.
|
2010-02-05 04:09:05 +01:00
|
|
|
*/
|
|
|
|
while (expand_dbname && keywords[i])
|
|
|
|
{
|
|
|
|
const char *pname = keywords[i];
|
|
|
|
const char *pvalue = values[i];
|
|
|
|
|
|
|
|
/* first find "dbname" if any */
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (strcmp(pname, "dbname") == 0 && pvalue)
|
2010-02-05 04:09:05 +01:00
|
|
|
{
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
/*
|
|
|
|
* If value is a connection string, parse it, but do not use
|
|
|
|
* defaults here -- those get picked up later. We only want to
|
|
|
|
* override for those parameters actually passed.
|
|
|
|
*/
|
2015-04-02 16:10:22 +02:00
|
|
|
if (recognized_connection_string(pvalue))
|
2010-02-05 04:09:05 +01:00
|
|
|
{
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
dbname_options = parse_connection_string(pvalue, errorMessage, false);
|
|
|
|
if (dbname_options == NULL)
|
2010-02-05 04:09:05 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
|
2010-01-28 07:28:26 +01:00
|
|
|
/* Make a working copy of PQconninfoOptions */
|
2012-03-22 17:08:34 +01:00
|
|
|
options = conninfo_init(errorMessage);
|
2010-01-28 07:28:26 +01:00
|
|
|
if (options == NULL)
|
|
|
|
{
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
PQconninfoFree(dbname_options);
|
2010-01-28 07:28:26 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse the keywords/values arrays */
|
2012-03-22 17:08:34 +01:00
|
|
|
i = 0;
|
2010-01-28 07:28:26 +01:00
|
|
|
while (keywords[i])
|
|
|
|
{
|
|
|
|
const char *pname = keywords[i];
|
|
|
|
const char *pvalue = values[i];
|
|
|
|
|
2014-04-19 14:41:51 +02:00
|
|
|
if (pvalue != NULL && pvalue[0] != '\0')
|
2010-01-28 07:28:26 +01:00
|
|
|
{
|
|
|
|
/* Search for the param record */
|
|
|
|
for (option = options; option->keyword != NULL; option++)
|
|
|
|
{
|
|
|
|
if (strcmp(option->keyword, pname) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for invalid connection option */
|
|
|
|
if (option->keyword == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2010-01-28 07:28:26 +01:00
|
|
|
libpq_gettext("invalid connection option \"%s\"\n"),
|
|
|
|
pname);
|
|
|
|
PQconninfoFree(options);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
PQconninfoFree(dbname_options);
|
2010-01-28 07:28:26 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-02-05 04:09:05 +01:00
|
|
|
/*
|
2014-11-25 16:12:07 +01:00
|
|
|
* If we are on the first dbname parameter, and we have a parsed
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
* connection string, copy those parameters across, overriding any
|
|
|
|
* existing previous settings.
|
2010-02-05 04:09:05 +01:00
|
|
|
*/
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (strcmp(pname, "dbname") == 0 && dbname_options)
|
2010-02-05 04:09:05 +01:00
|
|
|
{
|
|
|
|
PQconninfoOption *str_option;
|
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
for (str_option = dbname_options; str_option->keyword != NULL; str_option++)
|
2010-02-05 04:09:05 +01:00
|
|
|
{
|
|
|
|
if (str_option->val != NULL)
|
|
|
|
{
|
|
|
|
int k;
|
|
|
|
|
|
|
|
for (k = 0; options[k].keyword; k++)
|
|
|
|
{
|
|
|
|
if (strcmp(options[k].keyword, str_option->keyword) == 0)
|
|
|
|
{
|
2022-06-16 21:50:56 +02:00
|
|
|
free(options[k].val);
|
2010-02-05 04:09:05 +01:00
|
|
|
options[k].val = strdup(str_option->val);
|
2014-11-25 11:55:00 +01:00
|
|
|
if (!options[k].val)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2014-11-25 11:55:00 +01:00
|
|
|
PQconninfoFree(options);
|
|
|
|
PQconninfoFree(dbname_options);
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-02-05 04:09:05 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-05-24 03:35:49 +02:00
|
|
|
|
2014-11-25 16:12:07 +01:00
|
|
|
/*
|
|
|
|
* Forget the parsed connection string, so that any subsequent
|
|
|
|
* dbname parameters will not be expanded.
|
|
|
|
*/
|
|
|
|
PQconninfoFree(dbname_options);
|
|
|
|
dbname_options = NULL;
|
2010-02-05 04:09:05 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Store the value, overriding previous settings
|
|
|
|
*/
|
2022-06-16 21:50:56 +02:00
|
|
|
free(option->val);
|
2010-02-05 04:09:05 +01:00
|
|
|
option->val = strdup(pvalue);
|
|
|
|
if (!option->val)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2010-02-05 04:09:05 +01:00
|
|
|
PQconninfoFree(options);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
PQconninfoFree(dbname_options);
|
2010-02-05 04:09:05 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2010-01-28 07:28:26 +01:00
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
PQconninfoFree(dbname_options);
|
2010-01-28 07:28:26 +01:00
|
|
|
|
|
|
|
/*
|
2012-03-22 17:08:34 +01:00
|
|
|
* Add in defaults if the caller wants that.
|
2010-01-28 07:28:26 +01:00
|
|
|
*/
|
2012-03-22 17:08:34 +01:00
|
|
|
if (use_defaults)
|
|
|
|
{
|
|
|
|
if (!conninfo_add_defaults(options, errorMessage))
|
|
|
|
{
|
|
|
|
PQconninfoFree(options);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the default values for any unspecified options to the connection
|
|
|
|
* options array.
|
|
|
|
*
|
|
|
|
* Defaults are obtained from a service file, environment variables, etc.
|
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* Returns true if successful, otherwise false; errorMessage, if supplied,
|
2013-12-03 17:11:56 +01:00
|
|
|
* is filled in upon failure. Note that failure to locate a default value
|
|
|
|
* is not an error condition here --- we just leave the option's value as
|
|
|
|
* NULL.
|
2012-03-22 17:08:34 +01:00
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
conninfo_add_defaults(PQconninfoOption *options, PQExpBuffer errorMessage)
|
|
|
|
{
|
|
|
|
PQconninfoOption *option;
|
|
|
|
char *tmp;
|
2010-01-28 07:28:26 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there's a service spec, use it to obtain any not-explicitly-given
|
2013-12-03 17:11:56 +01:00
|
|
|
* parameters. Ignore error if no error message buffer is passed because
|
|
|
|
* there is no way to pass back the failure message.
|
2010-01-28 07:28:26 +01:00
|
|
|
*/
|
2013-12-03 17:11:56 +01:00
|
|
|
if (parseServiceInfo(options, errorMessage) != 0 && errorMessage)
|
2012-03-22 17:08:34 +01:00
|
|
|
return false;
|
2010-01-28 07:28:26 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the fallback resources for parameters not specified in the conninfo
|
|
|
|
* string nor the service.
|
|
|
|
*/
|
|
|
|
for (option = options; option->keyword != NULL; option++)
|
|
|
|
{
|
|
|
|
if (option->val != NULL)
|
|
|
|
continue; /* Value was in conninfo or service */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to get the environment variable fallback
|
|
|
|
*/
|
|
|
|
if (option->envvar != NULL)
|
|
|
|
{
|
|
|
|
if ((tmp = getenv(option->envvar)) != NULL)
|
|
|
|
{
|
|
|
|
option->val = strdup(tmp);
|
|
|
|
if (!option->val)
|
|
|
|
{
|
2013-12-03 17:11:56 +01:00
|
|
|
if (errorMessage)
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2012-03-22 17:08:34 +01:00
|
|
|
return false;
|
2010-01-28 07:28:26 +01:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-08 16:24:24 +02:00
|
|
|
/*
|
|
|
|
* Interpret the deprecated PGREQUIRESSL environment variable. Per
|
|
|
|
* tradition, translate values starting with "1" to sslmode=require,
|
|
|
|
* and ignore other values. Given both PGREQUIRESSL=1 and PGSSLMODE,
|
|
|
|
* PGSSLMODE takes precedence; the opposite was true before v9.3.
|
|
|
|
*/
|
|
|
|
if (strcmp(option->keyword, "sslmode") == 0)
|
|
|
|
{
|
|
|
|
const char *requiresslenv = getenv("PGREQUIRESSL");
|
|
|
|
|
|
|
|
if (requiresslenv != NULL && requiresslenv[0] == '1')
|
|
|
|
{
|
|
|
|
option->val = strdup("require");
|
|
|
|
if (!option->val)
|
|
|
|
{
|
|
|
|
if (errorMessage)
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2017-05-08 16:24:24 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-28 07:28:26 +01:00
|
|
|
/*
|
2012-03-22 17:08:34 +01:00
|
|
|
* No environment variable specified or the variable isn't set - try
|
|
|
|
* compiled-in default
|
2010-01-28 07:28:26 +01:00
|
|
|
*/
|
|
|
|
if (option->compiled != NULL)
|
|
|
|
{
|
|
|
|
option->val = strdup(option->compiled);
|
|
|
|
if (!option->val)
|
|
|
|
{
|
2013-12-03 17:11:56 +01:00
|
|
|
if (errorMessage)
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2012-03-22 17:08:34 +01:00
|
|
|
return false;
|
2010-01-28 07:28:26 +01:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Fix libpq's behavior when /etc/passwd isn't readable.
Some users run their applications in chroot environments that lack an
/etc/passwd file. This means that the current UID's user name and home
directory are not obtainable. libpq used to be all right with that,
so long as the database role name to use was specified explicitly.
But commit a4c8f14364c27508233f8a31ac4b10a4c90235a9 broke such cases by
causing any failure of pg_fe_getauthname() to be treated as a hard error.
In any case it did little to advance its nominal goal of causing errors
in pg_fe_getauthname() to be reported better. So revert that and instead
put some real error-reporting code in place. This requires changes to the
APIs of pg_fe_getauthname() and pqGetpwuid(), since the latter had
departed from the POSIX-specified API of getpwuid_r() in a way that made
it impossible to distinguish actual lookup errors from "no such user".
To allow such failures to be reported, while not failing if the caller
supplies a role name, add a second call of pg_fe_getauthname() in
connectOptions2(). This is a tad ugly, and could perhaps be avoided with
some refactoring of PQsetdbLogin(), but I'll leave that idea for later.
(Note that the complained-of misbehavior only occurs in PQsetdbLogin,
not when using the PQconnect functions, because in the latter we will
never bother to call pg_fe_getauthname() if the user gives a role name.)
In passing also clean up the Windows-side usage of GetUserName(): the
recommended buffer size is 257 bytes, the passed buffer length should
be the buffer size not buffer size less 1, and any error is reported
by GetLastError() not errno.
Per report from Christoph Berg. Back-patch to 9.4 where the chroot
failure case was introduced. The generally poor reporting of errors
here is of very long standing, of course, but given the lack of field
complaints about it we won't risk changing these APIs further back
(even though they're theoretically internal to libpq).
2015-01-11 18:35:44 +01:00
|
|
|
* Special handling for "user" option. Note that if pg_fe_getauthname
|
|
|
|
* fails, we just leave the value as NULL; there's no need for this to
|
|
|
|
* be an error condition if the caller provides a user name. The only
|
|
|
|
* reason we do this now at all is so that callers of PQconndefaults
|
|
|
|
* will see a correct default (barring error, of course).
|
2010-01-28 07:28:26 +01:00
|
|
|
*/
|
|
|
|
if (strcmp(option->keyword, "user") == 0)
|
|
|
|
{
|
Fix libpq's behavior when /etc/passwd isn't readable.
Some users run their applications in chroot environments that lack an
/etc/passwd file. This means that the current UID's user name and home
directory are not obtainable. libpq used to be all right with that,
so long as the database role name to use was specified explicitly.
But commit a4c8f14364c27508233f8a31ac4b10a4c90235a9 broke such cases by
causing any failure of pg_fe_getauthname() to be treated as a hard error.
In any case it did little to advance its nominal goal of causing errors
in pg_fe_getauthname() to be reported better. So revert that and instead
put some real error-reporting code in place. This requires changes to the
APIs of pg_fe_getauthname() and pqGetpwuid(), since the latter had
departed from the POSIX-specified API of getpwuid_r() in a way that made
it impossible to distinguish actual lookup errors from "no such user".
To allow such failures to be reported, while not failing if the caller
supplies a role name, add a second call of pg_fe_getauthname() in
connectOptions2(). This is a tad ugly, and could perhaps be avoided with
some refactoring of PQsetdbLogin(), but I'll leave that idea for later.
(Note that the complained-of misbehavior only occurs in PQsetdbLogin,
not when using the PQconnect functions, because in the latter we will
never bother to call pg_fe_getauthname() if the user gives a role name.)
In passing also clean up the Windows-side usage of GetUserName(): the
recommended buffer size is 257 bytes, the passed buffer length should
be the buffer size not buffer size less 1, and any error is reported
by GetLastError() not errno.
Per report from Christoph Berg. Back-patch to 9.4 where the chroot
failure case was introduced. The generally poor reporting of errors
here is of very long standing, of course, but given the lack of field
complaints about it we won't risk changing these APIs further back
(even though they're theoretically internal to libpq).
2015-01-11 18:35:44 +01:00
|
|
|
option->val = pg_fe_getauthname(NULL);
|
2010-01-28 07:28:26 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-22 17:08:34 +01:00
|
|
|
return true;
|
2010-01-28 07:28:26 +01:00
|
|
|
}
|
1996-11-09 11:39:54 +01:00
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
/*
|
|
|
|
* Subroutine for parse_connection_string
|
|
|
|
*
|
|
|
|
* Deal with a URI connection string.
|
|
|
|
*/
|
|
|
|
static PQconninfoOption *
|
|
|
|
conninfo_uri_parse(const char *uri, PQExpBuffer errorMessage,
|
|
|
|
bool use_defaults)
|
|
|
|
{
|
|
|
|
PQconninfoOption *options;
|
|
|
|
|
|
|
|
/* Make a working copy of PQconninfoOptions */
|
|
|
|
options = conninfo_init(errorMessage);
|
|
|
|
if (options == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!conninfo_uri_parse_options(options, uri, errorMessage))
|
|
|
|
{
|
|
|
|
PQconninfoFree(options);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add in defaults if the caller wants that.
|
|
|
|
*/
|
|
|
|
if (use_defaults)
|
|
|
|
{
|
|
|
|
if (!conninfo_add_defaults(options, errorMessage))
|
|
|
|
{
|
|
|
|
PQconninfoFree(options);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* conninfo_uri_parse_options
|
|
|
|
* Actual URI parser.
|
|
|
|
*
|
|
|
|
* If successful, returns true while the options array is filled with parsed
|
|
|
|
* options from the URI.
|
|
|
|
* If not successful, returns false and fills errorMessage accordingly.
|
|
|
|
*
|
2012-05-28 21:44:34 +02:00
|
|
|
* Parses the connection URI string in 'uri' according to the URI syntax (RFC
|
|
|
|
* 3986):
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*
|
2012-05-28 21:44:34 +02:00
|
|
|
* postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*
|
2012-05-28 21:44:34 +02:00
|
|
|
* where "netloc" is a hostname, an IPv4 address, or an IPv6 address surrounded
|
2016-11-03 14:25:20 +01:00
|
|
|
* by literal square brackets. As an extension, we also allow multiple
|
|
|
|
* netloc[:port] specifications, separated by commas:
|
|
|
|
*
|
|
|
|
* postgresql://[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*
|
2012-05-28 21:44:34 +02:00
|
|
|
* Any of the URI parts might use percent-encoding (%xy).
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
|
|
|
|
PQExpBuffer errorMessage)
|
|
|
|
{
|
|
|
|
int prefix_len;
|
|
|
|
char *p;
|
2016-11-22 21:32:13 +01:00
|
|
|
char *buf = NULL;
|
2014-11-25 11:55:00 +01:00
|
|
|
char *start;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
char prevchar = '\0';
|
2012-05-28 21:44:34 +02:00
|
|
|
char *user = NULL;
|
|
|
|
char *host = NULL;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
bool retval = false;
|
2016-11-03 14:25:20 +01:00
|
|
|
PQExpBufferData hostbuf;
|
|
|
|
PQExpBufferData portbuf;
|
|
|
|
|
|
|
|
initPQExpBuffer(&hostbuf);
|
|
|
|
initPQExpBuffer(&portbuf);
|
|
|
|
if (PQExpBufferDataBroken(hostbuf) || PQExpBufferDataBroken(portbuf))
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2016-11-22 21:32:13 +01:00
|
|
|
goto cleanup;
|
2016-11-03 14:25:20 +01:00
|
|
|
}
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2014-11-25 11:55:00 +01:00
|
|
|
/* need a modifiable copy of the input URI */
|
|
|
|
buf = strdup(uri);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (buf == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage,
|
|
|
|
libpq_gettext("out of memory\n"));
|
2016-11-22 21:32:13 +01:00
|
|
|
goto cleanup;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
2014-11-25 11:55:00 +01:00
|
|
|
start = buf;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
|
|
|
/* Skip the URI prefix */
|
2015-04-02 16:10:22 +02:00
|
|
|
prefix_len = uri_prefix_length(uri);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (prefix_len == 0)
|
|
|
|
{
|
|
|
|
/* Should never happen */
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
libpq_gettext("invalid URI propagated to internal parser routine: \"%s\"\n"),
|
|
|
|
uri);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
start += prefix_len;
|
|
|
|
p = start;
|
|
|
|
|
|
|
|
/* Look ahead for possible user credentials designator */
|
|
|
|
while (*p && *p != '@' && *p != '/')
|
|
|
|
++p;
|
|
|
|
if (*p == '@')
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Found username/password designator, so URI should be of the form
|
|
|
|
* "scheme://user[:password]@[netloc]".
|
|
|
|
*/
|
|
|
|
user = start;
|
|
|
|
|
|
|
|
p = user;
|
|
|
|
while (*p != ':' && *p != '@')
|
|
|
|
++p;
|
|
|
|
|
|
|
|
/* Save last char and cut off at end of user name */
|
|
|
|
prevchar = *p;
|
|
|
|
*p = '\0';
|
|
|
|
|
2012-05-28 21:44:34 +02:00
|
|
|
if (*user &&
|
|
|
|
!conninfo_storeval(options, "user", user,
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
errorMessage, false, true))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (prevchar == ':')
|
|
|
|
{
|
|
|
|
const char *password = p + 1;
|
|
|
|
|
|
|
|
while (*p != '@')
|
|
|
|
++p;
|
|
|
|
*p = '\0';
|
|
|
|
|
2012-05-28 21:44:34 +02:00
|
|
|
if (*password &&
|
|
|
|
!conninfo_storeval(options, "password", password,
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
errorMessage, false, true))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance past end of parsed user name or password token */
|
|
|
|
++p;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* No username/password designator found. Reset to start of URI.
|
|
|
|
*/
|
|
|
|
p = start;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-11-03 14:25:20 +01:00
|
|
|
* There may be multiple netloc[:port] pairs, each separated from the next
|
|
|
|
* by a comma. When we initially enter this loop, "p" has been
|
|
|
|
* incremented past optional URI credential information at this point and
|
|
|
|
* now points at the "netloc" part of the URI. On subsequent loop
|
|
|
|
* iterations, "p" has been incremented past the comma separator and now
|
|
|
|
* points at the start of the next "netloc".
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*/
|
2016-11-03 14:25:20 +01:00
|
|
|
for (;;)
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
{
|
2012-05-28 21:44:34 +02:00
|
|
|
/*
|
2016-11-03 14:25:20 +01:00
|
|
|
* Look for IPv6 address.
|
2012-05-28 21:44:34 +02:00
|
|
|
*/
|
2016-11-03 14:25:20 +01:00
|
|
|
if (*p == '[')
|
2012-05-28 21:44:34 +02:00
|
|
|
{
|
2016-11-03 14:25:20 +01:00
|
|
|
host = ++p;
|
|
|
|
while (*p && *p != ']')
|
|
|
|
++p;
|
|
|
|
if (!*p)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2016-11-03 14:25:20 +01:00
|
|
|
libpq_gettext("end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n"),
|
|
|
|
uri);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (p == host)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2016-11-03 14:25:20 +01:00
|
|
|
libpq_gettext("IPv6 host address may not be empty in URI: \"%s\"\n"),
|
|
|
|
uri);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cut off the bracket and advance */
|
|
|
|
*(p++) = '\0';
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The address may be followed by a port specifier or a slash or a
|
|
|
|
* query or a separator comma.
|
|
|
|
*/
|
|
|
|
if (*p && *p != ':' && *p != '/' && *p != '?' && *p != ',')
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2016-11-03 14:25:20 +01:00
|
|
|
libpq_gettext("unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n"),
|
|
|
|
*p, (int) (p - buf + 1), uri);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-05-28 21:44:34 +02:00
|
|
|
}
|
2016-11-03 14:25:20 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* not an IPv6 address: DNS-named or IPv4 netloc */
|
|
|
|
host = p;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
/*
|
|
|
|
* Look for port specifier (colon) or end of host specifier
|
|
|
|
* (slash) or query (question mark) or host separator (comma).
|
|
|
|
*/
|
|
|
|
while (*p && *p != ':' && *p != '/' && *p != '?' && *p != ',')
|
|
|
|
++p;
|
|
|
|
}
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
/* Save the hostname terminator before we null it */
|
|
|
|
prevchar = *p;
|
|
|
|
*p = '\0';
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
appendPQExpBufferStr(&hostbuf, host);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
if (prevchar == ':')
|
|
|
|
{
|
|
|
|
const char *port = ++p; /* advance past host terminator */
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
while (*p && *p != '/' && *p != '?' && *p != ',')
|
|
|
|
++p;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
prevchar = *p;
|
|
|
|
*p = '\0';
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
appendPQExpBufferStr(&portbuf, port);
|
|
|
|
}
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
if (prevchar != ',')
|
|
|
|
break;
|
|
|
|
++p; /* advance past comma separator */
|
2017-08-16 05:34:39 +02:00
|
|
|
appendPQExpBufferChar(&hostbuf, ',');
|
|
|
|
appendPQExpBufferChar(&portbuf, ',');
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
/* Save final values for host and port. */
|
|
|
|
if (PQExpBufferDataBroken(hostbuf) || PQExpBufferDataBroken(portbuf))
|
|
|
|
goto cleanup;
|
|
|
|
if (hostbuf.data[0] &&
|
|
|
|
!conninfo_storeval(options, "host", hostbuf.data,
|
|
|
|
errorMessage, false, true))
|
|
|
|
goto cleanup;
|
|
|
|
if (portbuf.data[0] &&
|
|
|
|
!conninfo_storeval(options, "port", portbuf.data,
|
|
|
|
errorMessage, false, true))
|
|
|
|
goto cleanup;
|
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (prevchar && prevchar != '?')
|
|
|
|
{
|
|
|
|
const char *dbname = ++p; /* advance past host terminator */
|
|
|
|
|
|
|
|
/* Look for query parameters */
|
|
|
|
while (*p && *p != '?')
|
|
|
|
++p;
|
|
|
|
|
|
|
|
prevchar = *p;
|
|
|
|
*p = '\0';
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid setting dbname to an empty string, as it forces the default
|
|
|
|
* value (username) and ignores $PGDATABASE, as opposed to not setting
|
|
|
|
* it at all.
|
|
|
|
*/
|
|
|
|
if (*dbname &&
|
|
|
|
!conninfo_storeval(options, "dbname", dbname,
|
|
|
|
errorMessage, false, true))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prevchar)
|
|
|
|
{
|
|
|
|
++p; /* advance past terminator */
|
|
|
|
|
|
|
|
if (!conninfo_uri_parse_params(p, options, errorMessage))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* everything parsed okay */
|
|
|
|
retval = true;
|
|
|
|
|
|
|
|
cleanup:
|
2016-11-03 14:25:20 +01:00
|
|
|
termPQExpBuffer(&hostbuf);
|
|
|
|
termPQExpBuffer(&portbuf);
|
2022-06-16 21:50:56 +02:00
|
|
|
free(buf);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Connection URI parameters parser routine
|
|
|
|
*
|
|
|
|
* If successful, returns true while connOptions is filled with parsed
|
|
|
|
* parameters. Otherwise, returns false and fills errorMessage appropriately.
|
|
|
|
*
|
|
|
|
* Destructively modifies 'params' buffer.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
conninfo_uri_parse_params(char *params,
|
|
|
|
PQconninfoOption *connOptions,
|
|
|
|
PQExpBuffer errorMessage)
|
|
|
|
{
|
|
|
|
while (*params)
|
|
|
|
{
|
2012-05-28 21:44:34 +02:00
|
|
|
char *keyword = params;
|
|
|
|
char *value = NULL;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
char *p = params;
|
2012-05-28 21:44:34 +02:00
|
|
|
bool malloced = false;
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
int oldmsglen;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan the params string for '=' and '&', marking the end of keyword
|
|
|
|
* and value respectively.
|
|
|
|
*/
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
if (*p == '=')
|
|
|
|
{
|
|
|
|
/* Was there '=' already? */
|
|
|
|
if (value != NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2012-07-02 20:12:46 +02:00
|
|
|
libpq_gettext("extra key/value separator \"=\" in URI query parameter: \"%s\"\n"),
|
2015-02-21 19:27:12 +01:00
|
|
|
keyword);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* Cut off keyword, advance to value */
|
2015-02-21 19:27:12 +01:00
|
|
|
*p++ = '\0';
|
|
|
|
value = p;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
|
|
|
else if (*p == '&' || *p == '\0')
|
|
|
|
{
|
2015-02-21 19:27:12 +01:00
|
|
|
/*
|
|
|
|
* If not at the end, cut off value and advance; leave p
|
|
|
|
* pointing to start of the next parameter, if any.
|
|
|
|
*/
|
|
|
|
if (*p != '\0')
|
|
|
|
*p++ = '\0';
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
/* Was there '=' at all? */
|
|
|
|
if (value == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2012-07-02 20:12:46 +02:00
|
|
|
libpq_gettext("missing key/value separator \"=\" in URI query parameter: \"%s\"\n"),
|
2015-02-21 19:27:12 +01:00
|
|
|
keyword);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
return false;
|
|
|
|
}
|
2015-02-21 19:27:12 +01:00
|
|
|
/* Got keyword and value, go process them. */
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
break;
|
|
|
|
}
|
2015-02-21 18:59:25 +01:00
|
|
|
else
|
|
|
|
++p; /* Advance over all other bytes. */
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
|
|
|
|
2012-05-28 21:44:34 +02:00
|
|
|
keyword = conninfo_uri_decode(keyword, errorMessage);
|
|
|
|
if (keyword == NULL)
|
|
|
|
{
|
|
|
|
/* conninfo_uri_decode already set an error message */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
value = conninfo_uri_decode(value, errorMessage);
|
|
|
|
if (value == NULL)
|
|
|
|
{
|
|
|
|
/* conninfo_uri_decode already set an error message */
|
|
|
|
free(keyword);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
malloced = true;
|
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
/*
|
2012-05-28 21:44:34 +02:00
|
|
|
* Special keyword handling for improved JDBC compatibility.
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*/
|
|
|
|
if (strcmp(keyword, "ssl") == 0 &&
|
|
|
|
strcmp(value, "true") == 0)
|
|
|
|
{
|
2012-05-28 21:44:34 +02:00
|
|
|
free(keyword);
|
|
|
|
free(value);
|
|
|
|
malloced = false;
|
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
keyword = "sslmode";
|
|
|
|
value = "require";
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Store the value if the corresponding option exists; ignore
|
2012-05-28 21:44:34 +02:00
|
|
|
* otherwise. At this point both keyword and value are not
|
|
|
|
* URI-encoded.
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*/
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
oldmsglen = errorMessage->len;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (!conninfo_storeval(connOptions, keyword, value,
|
2012-05-28 21:44:34 +02:00
|
|
|
errorMessage, true, false))
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
{
|
2015-02-21 19:27:12 +01:00
|
|
|
/* Insert generic message if conninfo_storeval didn't give one. */
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
if (errorMessage->len == oldmsglen)
|
|
|
|
appendPQExpBuffer(errorMessage,
|
2015-02-21 19:27:12 +01:00
|
|
|
libpq_gettext("invalid URI query parameter: \"%s\"\n"),
|
|
|
|
keyword);
|
|
|
|
/* And fail. */
|
2012-08-24 04:33:04 +02:00
|
|
|
if (malloced)
|
|
|
|
{
|
|
|
|
free(keyword);
|
|
|
|
free(value);
|
|
|
|
}
|
2012-06-08 14:46:39 +02:00
|
|
|
return false;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
2015-02-21 19:27:12 +01:00
|
|
|
|
2012-05-28 21:44:34 +02:00
|
|
|
if (malloced)
|
|
|
|
{
|
|
|
|
free(keyword);
|
|
|
|
free(value);
|
|
|
|
}
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2015-02-21 19:27:12 +01:00
|
|
|
/* Proceed to next key=value pair, if any */
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
params = p;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Connection URI decoder routine
|
|
|
|
*
|
|
|
|
* If successful, returns the malloc'd decoded string.
|
|
|
|
* If not successful, returns NULL and fills errorMessage accordingly.
|
|
|
|
*
|
|
|
|
* The string is decoded by replacing any percent-encoded tokens with
|
|
|
|
* corresponding characters, while preserving any non-encoded characters. A
|
|
|
|
* percent-encoded token is a character triplet: a percent sign, followed by a
|
|
|
|
* pair of hexadecimal digits (0-9A-F), where lower- and upper-case letters are
|
|
|
|
* treated identically.
|
|
|
|
*/
|
1999-02-05 05:25:55 +01:00
|
|
|
static char *
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
conninfo_uri_decode(const char *str, PQExpBuffer errorMessage)
|
|
|
|
{
|
2014-11-25 11:55:00 +01:00
|
|
|
char *buf;
|
|
|
|
char *p;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
const char *q = str;
|
|
|
|
|
2014-11-25 11:55:00 +01:00
|
|
|
buf = malloc(strlen(str) + 1);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (buf == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage, libpq_gettext("out of memory\n"));
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2014-11-25 11:55:00 +01:00
|
|
|
p = buf;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
if (*q != '%')
|
|
|
|
{
|
|
|
|
/* copy and check for NUL terminator */
|
|
|
|
if (!(*(p++) = *(q++)))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
int hi;
|
|
|
|
int lo;
|
|
|
|
int c;
|
|
|
|
|
|
|
|
++q; /* skip the percent sign itself */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Possible EOL will be caught by the first call to
|
|
|
|
* get_hexdigit(), so we never dereference an invalid q pointer.
|
|
|
|
*/
|
|
|
|
if (!(get_hexdigit(*q++, &hi) && get_hexdigit(*q++, &lo)))
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2012-08-02 19:10:30 +02:00
|
|
|
libpq_gettext("invalid percent-encoded token: \"%s\"\n"),
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
str);
|
|
|
|
free(buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
c = (hi << 4) | lo;
|
|
|
|
if (c == 0)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
2012-07-02 20:12:46 +02:00
|
|
|
libpq_gettext("forbidden value %%00 in percent-encoded value: \"%s\"\n"),
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
str);
|
|
|
|
free(buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
*(p++) = c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert hexadecimal digit character to its integer value.
|
|
|
|
*
|
|
|
|
* If successful, returns true and value is filled with digit's base 16 value.
|
|
|
|
* If not successful, returns false.
|
|
|
|
*
|
|
|
|
* Lower- and upper-case letters in the range A-F are treated identically.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
get_hexdigit(char digit, int *value)
|
|
|
|
{
|
|
|
|
if ('0' <= digit && digit <= '9')
|
|
|
|
*value = digit - '0';
|
|
|
|
else if ('A' <= digit && digit <= 'F')
|
|
|
|
*value = digit - 'A' + 10;
|
|
|
|
else if ('a' <= digit && digit <= 'f')
|
|
|
|
*value = digit - 'a' + 10;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an option value corresponding to the keyword in the connOptions array.
|
|
|
|
*
|
|
|
|
* If successful, returns a pointer to the corresponding option's value.
|
|
|
|
* If not successful, returns NULL.
|
|
|
|
*/
|
|
|
|
static const char *
|
2000-03-11 04:08:37 +01:00
|
|
|
conninfo_getval(PQconninfoOption *connOptions,
|
|
|
|
const char *keyword)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
|
|
|
PQconninfoOption *option;
|
|
|
|
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
option = conninfo_find(connOptions, keyword);
|
|
|
|
|
|
|
|
return option ? option->val : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Store a (new) value for an option corresponding to the keyword in
|
|
|
|
* connOptions array.
|
|
|
|
*
|
2012-05-28 21:44:34 +02:00
|
|
|
* If uri_decode is true, the value is URI-decoded. The keyword is always
|
|
|
|
* assumed to be non URI-encoded.
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*
|
|
|
|
* If successful, returns a pointer to the corresponding PQconninfoOption,
|
|
|
|
* which value is replaced with a strdup'd copy of the passed value string.
|
|
|
|
* The existing value for the option is free'd before replacing, if any.
|
|
|
|
*
|
|
|
|
* If not successful, returns NULL and fills errorMessage accordingly.
|
|
|
|
* However, if the reason of failure is an invalid keyword being passed and
|
2017-08-16 06:22:32 +02:00
|
|
|
* ignoreMissing is true, errorMessage will be left untouched.
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
*/
|
|
|
|
static PQconninfoOption *
|
|
|
|
conninfo_storeval(PQconninfoOption *connOptions,
|
|
|
|
const char *keyword, const char *value,
|
|
|
|
PQExpBuffer errorMessage, bool ignoreMissing,
|
|
|
|
bool uri_decode)
|
|
|
|
{
|
|
|
|
PQconninfoOption *option;
|
2012-05-28 21:44:34 +02:00
|
|
|
char *value_copy;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
|
2012-11-30 07:09:18 +01:00
|
|
|
/*
|
|
|
|
* For backwards compatibility, requiressl=1 gets translated to
|
|
|
|
* sslmode=require, and requiressl=0 gets translated to sslmode=prefer
|
|
|
|
* (which is the default for sslmode).
|
|
|
|
*/
|
|
|
|
if (strcmp(keyword, "requiressl") == 0)
|
|
|
|
{
|
|
|
|
keyword = "sslmode";
|
|
|
|
if (value[0] == '1')
|
|
|
|
value = "require";
|
|
|
|
else
|
|
|
|
value = "prefer";
|
|
|
|
}
|
|
|
|
|
2012-05-28 21:44:34 +02:00
|
|
|
option = conninfo_find(connOptions, keyword);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
if (option == NULL)
|
|
|
|
{
|
|
|
|
if (!ignoreMissing)
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBuffer(errorMessage,
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
libpq_gettext("invalid connection option \"%s\"\n"),
|
|
|
|
keyword);
|
2012-05-28 21:44:34 +02:00
|
|
|
return NULL;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (uri_decode)
|
|
|
|
{
|
|
|
|
value_copy = conninfo_uri_decode(value, errorMessage);
|
|
|
|
if (value_copy == NULL)
|
|
|
|
/* conninfo_uri_decode already set an error message */
|
2012-05-28 21:44:34 +02:00
|
|
|
return NULL;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
value_copy = strdup(value);
|
|
|
|
if (value_copy == NULL)
|
|
|
|
{
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
appendPQExpBufferStr(errorMessage, libpq_gettext("out of memory\n"));
|
2012-05-28 21:44:34 +02:00
|
|
|
return NULL;
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-16 21:50:56 +02:00
|
|
|
free(option->val);
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
option->val = value_copy;
|
|
|
|
|
|
|
|
return option;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a PQconninfoOption option corresponding to the keyword in the
|
|
|
|
* connOptions array.
|
|
|
|
*
|
|
|
|
* If successful, returns a pointer to the corresponding PQconninfoOption
|
|
|
|
* structure.
|
|
|
|
* If not successful, returns NULL.
|
|
|
|
*/
|
|
|
|
static PQconninfoOption *
|
|
|
|
conninfo_find(PQconninfoOption *connOptions, const char *keyword)
|
|
|
|
{
|
|
|
|
PQconninfoOption *option;
|
|
|
|
|
2000-03-11 04:08:37 +01:00
|
|
|
for (option = connOptions; option->keyword != NULL; option++)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
2000-03-11 04:08:37 +01:00
|
|
|
if (strcmp(option->keyword, keyword) == 0)
|
Accept postgres:// URIs in libpq connection functions
postgres:// URIs are an attempt to "stop the bleeding" in this general
area that has been said to occur due to external projects adopting their
own syntaxes. The syntaxes supported by this patch:
postgres://[user[:pwd]@][unix-socket][:port[/dbname]][?param1=value1&...]
postgres://[user[:pwd]@][net-location][:port][/dbname][?param1=value1&...]
should be enough to cover most interesting cases without having to
resort to "param=value" pairs, but those are provided for the cases that
need them regardless.
libpq documentation has been shuffled around a bit, to avoid stuffing
all the format details into the PQconnectdbParams description, which was
already a bit overwhelming. The list of keywords has moved to its own
subsection, and the details on the URI format live in another subsection.
This includes a simple test program, as requested in discussion, to
ensure that interesting corner cases continue to work appropriately in
the future.
Author: Alexander Shulgin
Some tweaking by Álvaro Herrera, Greg Smith, Daniel Farina, Peter Eisentraut
Reviewed by Robert Haas, Alexey Klyukin (offlist), Heikki Linnakangas,
Marko Kreen, and others
Oh, it also supports postgresql:// but that's probably just an accident.
2012-04-11 08:59:32 +02:00
|
|
|
return option;
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-30 07:09:18 +01:00
|
|
|
/*
|
|
|
|
* Return the connection options used for the connection
|
|
|
|
*/
|
|
|
|
PQconninfoOption *
|
|
|
|
PQconninfo(PGconn *conn)
|
|
|
|
{
|
|
|
|
PQExpBufferData errorBuf;
|
|
|
|
PQconninfoOption *connOptions;
|
|
|
|
|
|
|
|
if (conn == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
In libpq, always append new error messages to conn->errorMessage.
Previously, we had an undisciplined mish-mash of printfPQExpBuffer and
appendPQExpBuffer calls to report errors within libpq. This commit
establishes a uniform rule that appendPQExpBuffer[Str] should be used.
conn->errorMessage is reset only at the start of an application request,
and then accumulates messages till we're done. We can remove no less
than three different ad-hoc mechanisms that were used to get the effect
of concatenation of error messages within a sequence of operations.
Although this makes things quite a bit cleaner conceptually, the main
reason to do it is to make the world safer for the multiple-target-host
feature that was added awhile back. Previously, there were many cases
in which an error occurring during an individual host connection attempt
would wipe out the record of what had happened during previous attempts.
(The reporting is still inadequate, in that it can be hard to tell which
host got the failure, but that seems like a matter for a separate commit.)
Currently, lo_import and lo_export contain exceptions to the "never
use printfPQExpBuffer" rule. If we changed them, we'd risk reporting
an incidental lo_close failure before the actual read or write
failure, which would be confusing, not least because lo_close happened
after the main failure. We could improve this by inventing an
internal version of lo_close that doesn't reset the errorMessage; but
we'd also need a version of PQfn() that does that, and it didn't quite
seem worth the trouble for now.
Discussion: https://postgr.es/m/BN6PR05MB3492948E4FD76C156E747E8BC9160@BN6PR05MB3492.namprd05.prod.outlook.com
2021-01-11 19:12:09 +01:00
|
|
|
/*
|
|
|
|
* We don't actually report any errors here, but callees want a buffer,
|
|
|
|
* and we prefer not to trash the conn's errorMessage.
|
|
|
|
*/
|
2012-11-30 07:09:18 +01:00
|
|
|
initPQExpBuffer(&errorBuf);
|
|
|
|
if (PQExpBufferDataBroken(errorBuf))
|
|
|
|
return NULL; /* out of memory already :-( */
|
|
|
|
|
|
|
|
connOptions = conninfo_init(&errorBuf);
|
|
|
|
|
|
|
|
if (connOptions != NULL)
|
|
|
|
{
|
|
|
|
const internalPQconninfoOption *option;
|
|
|
|
|
|
|
|
for (option = PQconninfoOptions; option->keyword; option++)
|
|
|
|
{
|
|
|
|
char **connmember;
|
|
|
|
|
|
|
|
if (option->connofs < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
connmember = (char **) ((char *) conn + option->connofs);
|
|
|
|
|
|
|
|
if (*connmember)
|
|
|
|
conninfo_storeval(connOptions, option->keyword, *connmember,
|
|
|
|
&errorBuf, true, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
termPQExpBuffer(&errorBuf);
|
|
|
|
|
|
|
|
return connOptions;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-03-11 04:08:37 +01:00
|
|
|
void
|
|
|
|
PQconninfoFree(PQconninfoOption *connOptions)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
2000-03-11 04:08:37 +01:00
|
|
|
if (connOptions == NULL)
|
|
|
|
return;
|
|
|
|
|
2022-06-16 21:50:56 +02:00
|
|
|
for (PQconninfoOption *option = connOptions; option->keyword != NULL; option++)
|
|
|
|
free(option->val);
|
2000-03-11 04:08:37 +01:00
|
|
|
free(connOptions);
|
1996-11-09 11:39:54 +01:00
|
|
|
}
|
|
|
|
|
2001-11-11 03:09:05 +01:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* =========== accessor functions for PGconn ========= */
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQdb(const PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-07-12 06:53:59 +02:00
|
|
|
if (!conn)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
1996-07-09 08:22:35 +02:00
|
|
|
return conn->dbName;
|
|
|
|
}
|
|
|
|
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQuser(const PGconn *conn)
|
1996-11-09 11:39:54 +01:00
|
|
|
{
|
|
|
|
if (!conn)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
1996-11-09 11:39:54 +01:00
|
|
|
return conn->pguser;
|
|
|
|
}
|
|
|
|
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQpass(const PGconn *conn)
|
1998-09-03 04:10:56 +02:00
|
|
|
{
|
2016-11-03 14:25:20 +01:00
|
|
|
char *password = NULL;
|
|
|
|
|
1998-09-03 04:10:56 +02:00
|
|
|
if (!conn)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
2016-11-03 14:25:20 +01:00
|
|
|
if (conn->connhost != NULL)
|
|
|
|
password = conn->connhost[conn->whichhost].password;
|
|
|
|
if (password == NULL)
|
|
|
|
password = conn->pgpass;
|
2017-01-24 23:06:21 +01:00
|
|
|
/* Historically we've returned "" not NULL for no password specified */
|
|
|
|
if (password == NULL)
|
|
|
|
password = "";
|
2016-11-03 14:25:20 +01:00
|
|
|
return password;
|
1998-09-03 04:10:56 +02:00
|
|
|
}
|
|
|
|
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQhost(const PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-07-12 06:53:59 +02:00
|
|
|
if (!conn)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
2018-03-27 18:31:34 +02:00
|
|
|
|
|
|
|
if (conn->connhost != NULL)
|
2014-01-23 14:48:12 +01:00
|
|
|
{
|
2019-06-15 00:02:26 +02:00
|
|
|
/*
|
|
|
|
* Return the verbatim host value provided by user, or hostaddr in its
|
|
|
|
* lack.
|
|
|
|
*/
|
2018-03-27 18:31:34 +02:00
|
|
|
if (conn->connhost[conn->whichhost].host != NULL &&
|
|
|
|
conn->connhost[conn->whichhost].host[0] != '\0')
|
|
|
|
return conn->connhost[conn->whichhost].host;
|
|
|
|
else if (conn->connhost[conn->whichhost].hostaddr != NULL &&
|
|
|
|
conn->connhost[conn->whichhost].hostaddr[0] != '\0')
|
|
|
|
return conn->connhost[conn->whichhost].hostaddr;
|
2014-01-23 14:48:12 +01:00
|
|
|
}
|
2018-03-27 18:31:34 +02:00
|
|
|
|
|
|
|
return "";
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2018-11-19 18:34:12 +01:00
|
|
|
char *
|
|
|
|
PQhostaddr(const PGconn *conn)
|
|
|
|
{
|
|
|
|
if (!conn)
|
|
|
|
return NULL;
|
|
|
|
|
2019-06-15 00:02:26 +02:00
|
|
|
/* Return the parsed IP address */
|
|
|
|
if (conn->connhost != NULL && conn->connip != NULL)
|
|
|
|
return conn->connip;
|
2018-11-19 18:34:12 +01:00
|
|
|
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQport(const PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-07-12 06:53:59 +02:00
|
|
|
if (!conn)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
2018-03-27 18:31:34 +02:00
|
|
|
|
2016-11-03 14:25:20 +01:00
|
|
|
if (conn->connhost != NULL)
|
|
|
|
return conn->connhost[conn->whichhost].port;
|
2018-03-27 18:31:34 +02:00
|
|
|
|
|
|
|
return "";
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2021-03-09 15:01:22 +01:00
|
|
|
/*
|
|
|
|
* No longer does anything, but the function remains for API backwards
|
|
|
|
* compatibility.
|
|
|
|
*/
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQtty(const PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-07-12 06:53:59 +02:00
|
|
|
if (!conn)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
2021-03-09 15:01:22 +01:00
|
|
|
return "";
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQoptions(const PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-07-12 06:53:59 +02:00
|
|
|
if (!conn)
|
2004-01-07 19:56:30 +01:00
|
|
|
return NULL;
|
1998-09-03 04:10:56 +02:00
|
|
|
return conn->pgoptions;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ConnStatusType
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQstatus(const PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-07-12 06:53:59 +02:00
|
|
|
if (!conn)
|
|
|
|
return CONNECTION_BAD;
|
1996-07-09 08:22:35 +02:00
|
|
|
return conn->status;
|
|
|
|
}
|
|
|
|
|
2003-06-21 23:51:35 +02:00
|
|
|
PGTransactionStatusType
|
|
|
|
PQtransactionStatus(const PGconn *conn)
|
|
|
|
{
|
|
|
|
if (!conn || conn->status != CONNECTION_OK)
|
|
|
|
return PQTRANS_UNKNOWN;
|
|
|
|
if (conn->asyncStatus != PGASYNC_IDLE)
|
|
|
|
return PQTRANS_ACTIVE;
|
|
|
|
return conn->xactStatus;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
PQparameterStatus(const PGconn *conn, const char *paramName)
|
|
|
|
{
|
|
|
|
const pgParameterStatus *pstatus;
|
|
|
|
|
|
|
|
if (!conn || !paramName)
|
|
|
|
return NULL;
|
|
|
|
for (pstatus = conn->pstatus; pstatus != NULL; pstatus = pstatus->next)
|
|
|
|
{
|
|
|
|
if (strcmp(pstatus->name, paramName) == 0)
|
|
|
|
return pstatus->value;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
PQprotocolVersion(const PGconn *conn)
|
|
|
|
{
|
|
|
|
if (!conn)
|
|
|
|
return 0;
|
|
|
|
if (conn->status == CONNECTION_BAD)
|
|
|
|
return 0;
|
|
|
|
return PG_PROTOCOL_MAJOR(conn->pversion);
|
|
|
|
}
|
|
|
|
|
2004-08-11 20:06:01 +02:00
|
|
|
int
|
|
|
|
PQserverVersion(const PGconn *conn)
|
|
|
|
{
|
|
|
|
if (!conn)
|
|
|
|
return 0;
|
|
|
|
if (conn->status == CONNECTION_BAD)
|
|
|
|
return 0;
|
|
|
|
return conn->sversion;
|
|
|
|
}
|
|
|
|
|
2000-02-08 00:10:11 +01:00
|
|
|
char *
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQerrorMessage(const PGconn *conn)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1996-07-12 06:53:59 +02:00
|
|
|
if (!conn)
|
2001-07-15 15:45:04 +02:00
|
|
|
return libpq_gettext("connection pointer is NULL\n");
|
2000-03-11 04:08:37 +01:00
|
|
|
|
2021-07-29 19:33:31 +02:00
|
|
|
/*
|
|
|
|
* The errorMessage buffer might be marked "broken" due to having
|
|
|
|
* previously failed to allocate enough memory for the message. In that
|
|
|
|
* case, tell the application we ran out of memory.
|
|
|
|
*/
|
|
|
|
if (PQExpBufferBroken(&conn->errorMessage))
|
|
|
|
return libpq_gettext("out of memory\n");
|
|
|
|
|
1999-08-31 03:37:37 +02:00
|
|
|
return conn->errorMessage.data;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2014-04-16 16:45:48 +02:00
|
|
|
/*
|
|
|
|
* In Windows, socket values are unsigned, and an invalid socket value
|
|
|
|
* (INVALID_SOCKET) is ~0, which equals -1 in comparisons (with no compiler
|
|
|
|
* warning). Ideally we would return an unsigned value for PQsocket() on
|
|
|
|
* Windows, but that would cause the function's return value to differ from
|
|
|
|
* Unix, so we just return -1 for invalid sockets.
|
|
|
|
* http://msdn.microsoft.com/en-us/library/windows/desktop/cc507522%28v=vs.85%29.aspx
|
|
|
|
* http://stackoverflow.com/questions/10817252/why-is-invalid-socket-defined-as-0-in-winsock2-h-c
|
|
|
|
*/
|
1998-05-07 01:51:16 +02:00
|
|
|
int
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQsocket(const PGconn *conn)
|
1998-05-07 01:51:16 +02:00
|
|
|
{
|
|
|
|
if (!conn)
|
|
|
|
return -1;
|
2014-04-17 01:46:51 +02:00
|
|
|
return (conn->sock != PGINVALID_SOCKET) ? conn->sock : -1;
|
1998-05-07 01:51:16 +02:00
|
|
|
}
|
|
|
|
|
1998-09-03 04:10:56 +02:00
|
|
|
int
|
In the spirit of TODO item
* Add use of 'const' for varibles in source tree
(which is misspelled, btw.)
I went through the front-end libpq code and did so. This affects in
particular the various accessor functions (such as PQdb() and
PQgetvalue()) as well as, by necessity, the internal helpers they use.
I have been really thorough in that regard, perhaps some people will find
it annoying that things like
char * foo = PQgetvalue(res, 0, 0)
will generate a warning. On the other hand it _should_ generate one. This
is no real compatibility break, although a few clients will have to be
fixed to suppress warnings. (Which again would be in the spirit of the
above TODO.)
In addition I replaced some int's by size_t's and removed some warnings
(and generated some new ones -- grmpf!). Also I rewrote PQoidStatus (so it
actually honors the const!) and supplied a new function PQoidValue that
returns a proper Oid type. This is only front-end stuff, none of the
communicaton stuff was touched.
The psql patch also adds some new consts to honor the new libpq situation,
as well as fixes a fatal condition that resulted when using the -V
(--version) option and there is no database listening.
So, to summarize, the psql you should definitely put in (with or without
the libpq). If you think I went too far with the const-mania in libpq, let
me know and I'll make adjustments. If you approve it, I will also update
the docs.
-Peter
--
Peter Eisentraut Sernanders vaeg 10:115
1999-11-11 01:10:14 +01:00
|
|
|
PQbackendPID(const PGconn *conn)
|
1998-09-03 04:10:56 +02:00
|
|
|
{
|
|
|
|
if (!conn || conn->status != CONNECTION_OK)
|
|
|
|
return 0;
|
|
|
|
return conn->be_pid;
|
|
|
|
}
|
|
|
|
|
2021-03-15 22:13:42 +01:00
|
|
|
PGpipelineStatus
|
|
|
|
PQpipelineStatus(const PGconn *conn)
|
|
|
|
{
|
|
|
|
if (!conn)
|
|
|
|
return PQ_PIPELINE_OFF;
|
|
|
|
|
|
|
|
return conn->pipelineStatus;
|
|
|
|
}
|
|
|
|
|
2007-12-09 20:01:40 +01:00
|
|
|
int
|
|
|
|
PQconnectionNeedsPassword(const PGconn *conn)
|
|
|
|
{
|
2016-11-03 14:25:20 +01:00
|
|
|
char *password;
|
|
|
|
|
2007-12-09 20:01:40 +01:00
|
|
|
if (!conn)
|
|
|
|
return false;
|
2016-11-03 14:25:20 +01:00
|
|
|
password = PQpass(conn);
|
2007-12-09 20:01:40 +01:00
|
|
|
if (conn->password_needed &&
|
2016-11-03 14:25:20 +01:00
|
|
|
(password == NULL || password[0] == '\0'))
|
2007-12-09 20:01:40 +01:00
|
|
|
return true;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-07-08 20:28:56 +02:00
|
|
|
int
|
|
|
|
PQconnectionUsedPassword(const PGconn *conn)
|
|
|
|
{
|
|
|
|
if (!conn)
|
|
|
|
return false;
|
2008-09-22 16:21:44 +02:00
|
|
|
if (conn->password_needed)
|
2007-07-08 20:28:56 +02:00
|
|
|
return true;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2000-01-15 06:37:21 +01:00
|
|
|
int
|
2000-02-05 13:33:22 +01:00
|
|
|
PQclientEncoding(const PGconn *conn)
|
2000-01-15 06:37:21 +01:00
|
|
|
{
|
|
|
|
if (!conn || conn->status != CONNECTION_OK)
|
|
|
|
return -1;
|
|
|
|
return conn->client_encoding;
|
|
|
|
}
|
|
|
|
|
2000-02-05 13:33:22 +01:00
|
|
|
int
|
|
|
|
PQsetClientEncoding(PGconn *conn, const char *encoding)
|
|
|
|
{
|
|
|
|
char qbuf[128];
|
2004-03-24 04:45:00 +01:00
|
|
|
static const char query[] = "set client_encoding to '%s'";
|
2000-02-05 13:33:22 +01:00
|
|
|
PGresult *res;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
if (!conn || conn->status != CONNECTION_OK)
|
|
|
|
return -1;
|
|
|
|
|
2000-02-19 06:04:54 +01:00
|
|
|
if (!encoding)
|
|
|
|
return -1;
|
|
|
|
|
2011-02-19 07:54:58 +01:00
|
|
|
/* Resolve special "auto" value from the locale */
|
|
|
|
if (strcmp(encoding, "auto") == 0)
|
|
|
|
encoding = pg_encoding_to_char(pg_get_encoding_from_locale(NULL, true));
|
|
|
|
|
2000-02-05 13:33:22 +01:00
|
|
|
/* check query buffer overflow */
|
|
|
|
if (sizeof(qbuf) < (sizeof(query) + strlen(encoding)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* ok, now send a query */
|
|
|
|
sprintf(qbuf, query, encoding);
|
|
|
|
res = PQexec(conn, qbuf);
|
|
|
|
|
2004-01-07 19:56:30 +01:00
|
|
|
if (res == NULL)
|
2000-02-05 13:33:22 +01:00
|
|
|
return -1;
|
|
|
|
if (res->resultStatus != PGRES_COMMAND_OK)
|
|
|
|
status = -1;
|
|
|
|
else
|
|
|
|
{
|
Modify libpq's string-escaping routines to be aware of encoding considerations
and standard_conforming_strings. The encoding changes are needed for proper
escaping in multibyte encodings, as per the SQL-injection vulnerabilities
noted in CVE-2006-2313 and CVE-2006-2314. Concurrent fixes are being applied
to the server to ensure that it rejects queries that may have been corrupted
by attempted SQL injection, but this merely guarantees that unpatched clients
will fail rather than allow injection. An actual fix requires changing the
client-side code. While at it we have also fixed these routines to understand
about standard_conforming_strings, so that the upcoming changeover to SQL-spec
string syntax can be somewhat transparent to client code.
Since the existing API of PQescapeString and PQescapeBytea provides no way to
inform them which settings are in use, these functions are now deprecated in
favor of new functions PQescapeStringConn and PQescapeByteaConn. The new
functions take the PGconn to which the string will be sent as an additional
parameter, and look inside the connection structure to determine what to do.
So as to provide some functionality for clients using the old functions,
libpq stores the latest encoding and standard_conforming_strings values
received from the backend in static variables, and the old functions consult
these variables. This will work reliably in clients using only one Postgres
connection at a time, or even multiple connections if they all use the same
encoding and string syntax settings; which should cover many practical
scenarios.
Clients that use homebrew escaping methods, such as PHP's addslashes()
function or even hardwired regexp substitution, will require extra effort
to fix :-(. It is strongly recommended that such code be replaced by use of
PQescapeStringConn/PQescapeByteaConn if at all feasible.
2006-05-21 22:19:23 +02:00
|
|
|
/*
|
2021-03-04 09:45:55 +01:00
|
|
|
* We rely on the backend to report the parameter value, and we'll
|
|
|
|
* change state at that time.
|
Modify libpq's string-escaping routines to be aware of encoding considerations
and standard_conforming_strings. The encoding changes are needed for proper
escaping in multibyte encodings, as per the SQL-injection vulnerabilities
noted in CVE-2006-2313 and CVE-2006-2314. Concurrent fixes are being applied
to the server to ensure that it rejects queries that may have been corrupted
by attempted SQL injection, but this merely guarantees that unpatched clients
will fail rather than allow injection. An actual fix requires changing the
client-side code. While at it we have also fixed these routines to understand
about standard_conforming_strings, so that the upcoming changeover to SQL-spec
string syntax can be somewhat transparent to client code.
Since the existing API of PQescapeString and PQescapeBytea provides no way to
inform them which settings are in use, these functions are now deprecated in
favor of new functions PQescapeStringConn and PQescapeByteaConn. The new
functions take the PGconn to which the string will be sent as an additional
parameter, and look inside the connection structure to determine what to do.
So as to provide some functionality for clients using the old functions,
libpq stores the latest encoding and standard_conforming_strings values
received from the backend in static variables, and the old functions consult
these variables. This will work reliably in clients using only one Postgres
connection at a time, or even multiple connections if they all use the same
encoding and string syntax settings; which should cover many practical
scenarios.
Clients that use homebrew escaping methods, such as PHP's addslashes()
function or even hardwired regexp substitution, will require extra effort
to fix :-(. It is strongly recommended that such code be replaced by use of
PQescapeStringConn/PQescapeByteaConn if at all feasible.
2006-05-21 22:19:23 +02:00
|
|
|
*/
|
2000-02-05 13:33:22 +01:00
|
|
|
status = 0; /* everything is ok */
|
|
|
|
}
|
|
|
|
PQclear(res);
|
2006-01-11 09:43:13 +01:00
|
|
|
return status;
|
2000-02-05 13:33:22 +01:00
|
|
|
}
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2003-06-21 23:51:35 +02:00
|
|
|
PGVerbosity
|
|
|
|
PQsetErrorVerbosity(PGconn *conn, PGVerbosity verbosity)
|
|
|
|
{
|
|
|
|
PGVerbosity old;
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
return PQERRORS_DEFAULT;
|
|
|
|
old = conn->verbosity;
|
|
|
|
conn->verbosity = verbosity;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2015-09-05 17:58:20 +02:00
|
|
|
PGContextVisibility
|
|
|
|
PQsetErrorContextVisibility(PGconn *conn, PGContextVisibility show_context)
|
|
|
|
{
|
|
|
|
PGContextVisibility old;
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
return PQSHOW_CONTEXT_ERRORS;
|
|
|
|
old = conn->show_context;
|
|
|
|
conn->show_context = show_context;
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2003-06-21 23:51:35 +02:00
|
|
|
PQnoticeReceiver
|
|
|
|
PQsetNoticeReceiver(PGconn *conn, PQnoticeReceiver proc, void *arg)
|
|
|
|
{
|
|
|
|
PQnoticeReceiver old;
|
|
|
|
|
|
|
|
if (conn == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
old = conn->noticeHooks.noticeRec;
|
|
|
|
if (proc)
|
|
|
|
{
|
|
|
|
conn->noticeHooks.noticeRec = proc;
|
|
|
|
conn->noticeHooks.noticeRecArg = arg;
|
|
|
|
}
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
1999-10-26 06:49:00 +02:00
|
|
|
PQnoticeProcessor
|
1998-08-09 04:59:33 +02:00
|
|
|
PQsetNoticeProcessor(PGconn *conn, PQnoticeProcessor proc, void *arg)
|
|
|
|
{
|
1999-10-26 06:49:00 +02:00
|
|
|
PQnoticeProcessor old;
|
2000-03-11 04:08:37 +01:00
|
|
|
|
1998-08-09 04:59:33 +02:00
|
|
|
if (conn == NULL)
|
1999-10-26 06:49:00 +02:00
|
|
|
return NULL;
|
|
|
|
|
2003-06-21 23:51:35 +02:00
|
|
|
old = conn->noticeHooks.noticeProc;
|
2000-03-11 04:08:37 +01:00
|
|
|
if (proc)
|
|
|
|
{
|
2003-06-21 23:51:35 +02:00
|
|
|
conn->noticeHooks.noticeProc = proc;
|
|
|
|
conn->noticeHooks.noticeProcArg = arg;
|
1999-10-26 06:49:00 +02:00
|
|
|
}
|
|
|
|
return old;
|
1998-08-09 04:59:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-06-21 23:51:35 +02:00
|
|
|
* The default notice message receiver just gets the standard notice text
|
|
|
|
* and sends it to the notice processor. This two-level setup exists
|
|
|
|
* mostly for backwards compatibility; perhaps we should deprecate use of
|
|
|
|
* PQsetNoticeProcessor?
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
defaultNoticeReceiver(void *arg, const PGresult *res)
|
|
|
|
{
|
|
|
|
(void) arg; /* not used */
|
2003-06-23 21:20:25 +02:00
|
|
|
if (res->noticeHooks.noticeProc != NULL)
|
2017-09-07 18:06:23 +02:00
|
|
|
res->noticeHooks.noticeProc(res->noticeHooks.noticeProcArg,
|
|
|
|
PQresultErrorMessage(res));
|
2003-06-21 23:51:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The default notice message processor just prints the
|
1998-08-09 04:59:33 +02:00
|
|
|
* message on stderr. Applications can override this if they
|
|
|
|
* want the messages to go elsewhere (a window, for example).
|
|
|
|
* Note that simply discarding notices is probably a bad idea.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
defaultNoticeProcessor(void *arg, const char *message)
|
|
|
|
{
|
2000-02-08 00:10:11 +01:00
|
|
|
(void) arg; /* not used */
|
1998-08-09 04:59:33 +02:00
|
|
|
/* Note: we expect the supplied string to end with a newline already. */
|
|
|
|
fprintf(stderr, "%s", message);
|
|
|
|
}
|
2002-08-15 04:56:19 +02:00
|
|
|
|
2003-04-18 00:26:02 +02:00
|
|
|
/*
|
|
|
|
* returns a pointer to the next token or NULL if the current
|
|
|
|
* token doesn't match
|
|
|
|
*/
|
|
|
|
static char *
|
2017-10-31 15:34:31 +01:00
|
|
|
pwdfMatchesString(char *buf, const char *token)
|
2002-08-15 04:56:19 +02:00
|
|
|
{
|
2017-10-31 15:34:31 +01:00
|
|
|
char *tbuf;
|
|
|
|
const char *ttok;
|
2002-08-15 04:56:19 +02:00
|
|
|
bool bslash = false;
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2002-08-15 04:56:19 +02:00
|
|
|
if (buf == NULL || token == NULL)
|
|
|
|
return NULL;
|
|
|
|
tbuf = buf;
|
|
|
|
ttok = token;
|
2009-05-18 18:15:22 +02:00
|
|
|
if (tbuf[0] == '*' && tbuf[1] == ':')
|
2002-08-15 04:56:19 +02:00
|
|
|
return tbuf + 2;
|
|
|
|
while (*tbuf != 0)
|
|
|
|
{
|
|
|
|
if (*tbuf == '\\' && !bslash)
|
|
|
|
{
|
|
|
|
tbuf++;
|
|
|
|
bslash = true;
|
|
|
|
}
|
|
|
|
if (*tbuf == ':' && *ttok == 0 && !bslash)
|
|
|
|
return tbuf + 1;
|
|
|
|
bslash = false;
|
|
|
|
if (*ttok == 0)
|
|
|
|
return NULL;
|
|
|
|
if (*tbuf == *ttok)
|
|
|
|
{
|
|
|
|
tbuf++;
|
|
|
|
ttok++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2003-01-30 20:49:54 +01:00
|
|
|
/* Get a password from the password file. Return value is malloc'd. */
|
2003-04-18 00:26:02 +02:00
|
|
|
static char *
|
2017-10-31 15:34:31 +01:00
|
|
|
passwordFromFile(const char *hostname, const char *port, const char *dbname,
|
|
|
|
const char *username, const char *pgpassfile)
|
2002-08-15 04:56:19 +02:00
|
|
|
{
|
|
|
|
FILE *fp;
|
2002-09-06 00:05:50 +02:00
|
|
|
struct stat stat_buf;
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
PQExpBufferData buf;
|
2002-08-15 04:56:19 +02:00
|
|
|
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
if (dbname == NULL || dbname[0] == '\0')
|
2002-08-15 04:56:19 +02:00
|
|
|
return NULL;
|
|
|
|
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
if (username == NULL || username[0] == '\0')
|
2002-08-15 04:56:19 +02:00
|
|
|
return NULL;
|
|
|
|
|
2006-05-17 23:50:54 +02:00
|
|
|
/* 'localhost' matches pghost of '' or the default socket directory */
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
if (hostname == NULL || hostname[0] == '\0')
|
2002-08-15 04:56:19 +02:00
|
|
|
hostname = DefaultHost;
|
2020-11-25 08:14:23 +01:00
|
|
|
else if (is_unixsock_path(hostname))
|
2006-10-04 02:30:14 +02:00
|
|
|
|
2006-05-18 18:26:44 +02:00
|
|
|
/*
|
|
|
|
* We should probably use canonicalize_path(), but then we have to
|
|
|
|
* bring path.c into libpq, and it doesn't seem worth it.
|
|
|
|
*/
|
|
|
|
if (strcmp(hostname, DEFAULT_PGSOCKET_DIR) == 0)
|
2006-05-17 23:50:54 +02:00
|
|
|
hostname = DefaultHost;
|
2006-10-04 02:30:14 +02:00
|
|
|
|
Fix libpq's code for searching .pgpass; rationalize empty-list-item cases.
Before v10, we always searched ~/.pgpass using the host parameter,
and nothing else, to match to the "hostname" field of ~/.pgpass.
(However, null host or host matching DEFAULT_PGSOCKET_DIR was replaced by
"localhost".) In v10, this got broken by commit 274bb2b38, repaired by
commit bdac9836d, and broken again by commit 7b02ba62e; in the code
actually shipped, we'd search with hostaddr if both that and host were
specified --- though oddly, *not* if only hostaddr were specified.
Since this is directly contrary to the documentation, and not
backwards-compatible, it's clearly a bug.
However, the change wasn't totally without justification, even though it
wasn't done quite right, because the pre-v10 behavior has arguably been
buggy since we added hostaddr. If hostaddr is specified and host isn't,
the pre-v10 code will search ~/.pgpass for "localhost", and ship that
password off to a server that most likely isn't local at all. That's
unhelpful at best, and could be a security breach at worst.
Therefore, rather than just revert to that old behavior, let's define
the behavior as "search with host if provided, else with hostaddr if
provided, else search for localhost". (As before, a host name matching
DEFAULT_PGSOCKET_DIR is replaced by localhost.) This matches the
behavior of the actual connection code, so that we don't pick up an
inappropriate password; and it allows useful searches to happen when
only hostaddr is given.
While we're messing around here, ensure that empty elements within a
host or hostaddr list select the same behavior as a totally-empty
field would; for instance "host=a,,b" is equivalent to "host=a,/tmp,b"
if DEFAULT_PGSOCKET_DIR is /tmp. Things worked that way in some cases
already, but not consistently so, which contributed to the confusion
about what key ~/.pgpass would get searched with.
Update documentation accordingly, and also clarify some nearby text.
Back-patch to v10 where the host/hostaddr list functionality was
introduced.
Discussion: https://postgr.es/m/30805.1532749137@sss.pgh.pa.us
2018-08-01 18:30:36 +02:00
|
|
|
if (port == NULL || port[0] == '\0')
|
2002-08-15 04:56:19 +02:00
|
|
|
port = DEF_PGPORT_STR;
|
|
|
|
|
2002-08-30 01:06:32 +02:00
|
|
|
/* If password file cannot be opened, ignore it. */
|
2008-03-31 04:43:14 +02:00
|
|
|
if (stat(pgpassfile, &stat_buf) != 0)
|
2002-08-30 01:06:32 +02:00
|
|
|
return NULL;
|
|
|
|
|
2005-06-19 15:10:56 +02:00
|
|
|
#ifndef WIN32
|
2005-06-10 05:02:30 +02:00
|
|
|
if (!S_ISREG(stat_buf.st_mode))
|
|
|
|
{
|
|
|
|
fprintf(stderr,
|
2005-09-26 19:49:09 +02:00
|
|
|
libpq_gettext("WARNING: password file \"%s\" is not a plain file\n"),
|
2005-06-10 05:02:30 +02:00
|
|
|
pgpassfile);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2002-08-30 01:06:32 +02:00
|
|
|
/* If password file is insecure, alert the user and ignore it. */
|
|
|
|
if (stat_buf.st_mode & (S_IRWXG | S_IRWXO))
|
|
|
|
{
|
|
|
|
fprintf(stderr,
|
2008-03-31 04:43:14 +02:00
|
|
|
libpq_gettext("WARNING: password file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n"),
|
2002-09-06 00:05:50 +02:00
|
|
|
pgpassfile);
|
2002-08-30 01:06:32 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2007-02-20 16:20:51 +01:00
|
|
|
#else
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2007-02-20 16:20:51 +01:00
|
|
|
/*
|
|
|
|
* On Win32, the directory is protected, so we don't have to check the
|
|
|
|
* file.
|
|
|
|
*/
|
2002-10-03 19:09:42 +02:00
|
|
|
#endif
|
2002-08-30 01:06:32 +02:00
|
|
|
|
2002-09-06 00:05:50 +02:00
|
|
|
fp = fopen(pgpassfile, "r");
|
2002-08-15 04:56:19 +02:00
|
|
|
if (fp == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* Use an expansible buffer to accommodate any reasonable line length */
|
|
|
|
initPQExpBuffer(&buf);
|
|
|
|
|
2010-03-03 21:31:09 +01:00
|
|
|
while (!feof(fp) && !ferror(fp))
|
2002-08-15 04:56:19 +02:00
|
|
|
{
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* Make sure there's a reasonable amount of room in the buffer */
|
|
|
|
if (!enlargePQExpBuffer(&buf, 128))
|
|
|
|
break;
|
2002-09-04 22:31:48 +02:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* Read some data, appending it to what we already have */
|
|
|
|
if (fgets(buf.data + buf.len, buf.maxlen - buf.len, fp) == NULL)
|
2010-04-30 19:09:13 +02:00
|
|
|
break;
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
buf.len += strlen(buf.data + buf.len);
|
2003-01-30 20:49:54 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* If we don't yet have a whole line, loop around to read more */
|
|
|
|
if (!(buf.len > 0 && buf.data[buf.len - 1] == '\n') && !feof(fp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* ignore comments */
|
|
|
|
if (buf.data[0] != '#')
|
2020-03-05 05:00:38 +01:00
|
|
|
{
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
char *t = buf.data;
|
|
|
|
int len;
|
2020-03-05 05:00:38 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* strip trailing newline and carriage return */
|
|
|
|
len = pg_strip_crlf(t);
|
2020-03-05 05:00:38 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
if (len > 0 &&
|
|
|
|
(t = pwdfMatchesString(t, hostname)) != NULL &&
|
|
|
|
(t = pwdfMatchesString(t, port)) != NULL &&
|
|
|
|
(t = pwdfMatchesString(t, dbname)) != NULL &&
|
|
|
|
(t = pwdfMatchesString(t, username)) != NULL)
|
2020-03-05 05:00:38 +01:00
|
|
|
{
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* Found a match. */
|
|
|
|
char *ret,
|
|
|
|
*p1,
|
|
|
|
*p2;
|
2016-11-15 22:17:19 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
ret = strdup(t);
|
2003-01-30 20:49:54 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
fclose(fp);
|
|
|
|
explicit_bzero(buf.data, buf.maxlen);
|
|
|
|
termPQExpBuffer(&buf);
|
2016-11-15 22:17:19 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
if (!ret)
|
|
|
|
{
|
|
|
|
/* Out of memory. XXX: an error message would be nice. */
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-12-22 18:55:27 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* De-escape password. */
|
|
|
|
for (p1 = p2 = ret; *p1 != ':' && *p1 != '\0'; ++p1, ++p2)
|
|
|
|
{
|
|
|
|
if (*p1 == '\\' && p1[1] != '\0')
|
|
|
|
++p1;
|
|
|
|
*p2 = *p1;
|
|
|
|
}
|
|
|
|
*p2 = '\0';
|
2014-11-25 11:55:00 +01:00
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2011-12-22 18:55:27 +01:00
|
|
|
}
|
|
|
|
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
/* No match, reset buffer to prepare for next line. */
|
|
|
|
buf.len = 0;
|
2002-08-15 04:56:19 +02:00
|
|
|
}
|
2002-09-06 00:05:50 +02:00
|
|
|
|
2002-08-15 04:56:19 +02:00
|
|
|
fclose(fp);
|
Teach libpq to handle arbitrary-length lines in .pgpass files.
Historically there's been a hard-wired assumption here that no line of
a .pgpass file could be as long as NAMEDATALEN*5 bytes. That's a bit
shaky to start off with, because (a) there's no reason to suppose that
host names fit in NAMEDATALEN, and (b) this figure fails to allow for
backslash escape characters. However, it fails completely if someone
wants to use a very long password, and we're now hearing reports of
people wanting to use "security tokens" that can run up to several
hundred bytes. Another angle is that the file is specified to allow
comment lines, but there's no reason to assume that long comment lines
aren't possible.
Rather than guessing at what might be a more suitable limit, let's
replace the fixed-size buffer with an expansible PQExpBuffer. That
adds one malloc/free cycle to the typical use-case, but that's surely
pretty cheap relative to the I/O this code has to do.
Also, add TAP test cases to exercise this code, because there was no
test coverage before.
This reverts most of commit 2eb3bc588, as there's no longer a need for
a warning message about overlength .pgpass lines. (I kept the explicit
check for comment lines, though.)
In HEAD and v13, this also fixes an oversight in 74a308cf5: there's not
much point in explicit_bzero'ing the line buffer if we only do so in two
of the three exit paths.
Back-patch to all supported branches, except that the test case only
goes back to v10 where src/test/authentication/ was added.
Discussion: https://postgr.es/m/4187382.1598909041@sss.pgh.pa.us
2020-09-01 19:14:44 +02:00
|
|
|
explicit_bzero(buf.data, buf.maxlen);
|
|
|
|
termPQExpBuffer(&buf);
|
2002-08-15 04:56:19 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2004-01-09 03:02:43 +01:00
|
|
|
|
2010-03-13 15:55:57 +01:00
|
|
|
|
|
|
|
/*
|
2017-02-03 01:49:15 +01:00
|
|
|
* If the connection failed due to bad password, we should mention
|
|
|
|
* if we got the password from the pgpassfile.
|
2010-03-13 15:55:57 +01:00
|
|
|
*/
|
|
|
|
static void
|
2017-01-24 23:06:21 +01:00
|
|
|
pgpassfileWarning(PGconn *conn)
|
2010-03-13 15:55:57 +01:00
|
|
|
{
|
2017-01-24 23:06:21 +01:00
|
|
|
/* If it was 'invalid authorization', add pgpassfile mention */
|
2010-03-13 15:55:57 +01:00
|
|
|
/* only works with >= 9.0 servers */
|
Fix failure to reset libpq's state fully between connection attempts.
The logic in PQconnectPoll() did not take care to ensure that all of
a PGconn's internal state variables were reset before trying a new
connection attempt. If we got far enough in the connection sequence
to have changed any of these variables, and then decided to try a new
server address or server name, the new connection might be completed
with some state that really only applied to the failed connection.
While this has assorted bad consequences, the only one that is clearly
a security issue is that password_needed didn't get reset, so that
if the first server asked for a password and the second didn't,
PQconnectionUsedPassword() would return an incorrect result. This
could be leveraged by unprivileged users of dblink or postgres_fdw
to allow them to use server-side login credentials that they should
not be able to use.
Other notable problems include the possibility of forcing a v2-protocol
connection to a server capable of supporting v3, or overriding
"sslmode=prefer" to cause a non-encrypted connection to a server that
would have accepted an encrypted one. Those are certainly bugs but
it's harder to paint them as security problems in themselves. However,
forcing a v2-protocol connection could result in libpq having a wrong
idea of the server's standard_conforming_strings setting, which opens
the door to SQL-injection attacks. The extent to which that's actually
a problem, given the prerequisite that the attacker needs control of
the client's connection parameters, is unclear.
These problems have existed for a long time, but became more easily
exploitable in v10, both because it introduced easy ways to force libpq
to abandon a connection attempt at a late stage and then try another one
(rather than just giving up), and because it provided an easy way to
specify multiple target hosts.
Fix by rearranging PQconnectPoll's state machine to provide centralized
places to reset state properly when moving to a new target host or when
dropping and retrying a connection to the same host.
Tom Lane, reviewed by Noah Misch. Our thanks to Andrew Krasichkov
for finding and reporting the problem.
Security: CVE-2018-10915
2018-08-06 16:53:35 +02:00
|
|
|
if (conn->password_needed &&
|
|
|
|
conn->connhost[conn->whichhost].password != NULL &&
|
|
|
|
conn->result)
|
2010-03-13 15:55:57 +01:00
|
|
|
{
|
2017-02-03 01:49:15 +01:00
|
|
|
const char *sqlstate = PQresultErrorField(conn->result,
|
|
|
|
PG_DIAG_SQLSTATE);
|
|
|
|
|
|
|
|
if (sqlstate && strcmp(sqlstate, ERRCODE_INVALID_PASSWORD) == 0)
|
|
|
|
appendPQExpBuffer(&conn->errorMessage,
|
2010-03-17 21:58:38 +01:00
|
|
|
libpq_gettext("password retrieved from file \"%s\"\n"),
|
2017-02-03 01:49:15 +01:00
|
|
|
conn->pgpassfile);
|
2010-03-13 15:55:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-28 02:40:48 +01:00
|
|
|
/*
|
2020-11-02 07:14:41 +01:00
|
|
|
* Check if the SSL protocol value given in input is valid or not.
|
2020-01-28 02:40:48 +01:00
|
|
|
* This is used as a sanity check routine for the connection parameters
|
2020-04-30 06:39:10 +02:00
|
|
|
* ssl_min_protocol_version and ssl_max_protocol_version.
|
2020-01-28 02:40:48 +01:00
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
sslVerifyProtocolVersion(const char *version)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* An empty string and a NULL value are considered valid as it is
|
|
|
|
* equivalent to ignoring the parameter.
|
|
|
|
*/
|
|
|
|
if (!version || strlen(version) == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (pg_strcasecmp(version, "TLSv1") == 0 ||
|
|
|
|
pg_strcasecmp(version, "TLSv1.1") == 0 ||
|
|
|
|
pg_strcasecmp(version, "TLSv1.2") == 0 ||
|
|
|
|
pg_strcasecmp(version, "TLSv1.3") == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* anything else is wrong */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that the SSL protocol range given in input is correct. The check
|
|
|
|
* is performed on the input string to keep it TLS backend agnostic. Input
|
|
|
|
* to this function is expected verified with sslVerifyProtocolVersion().
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
sslVerifyProtocolRange(const char *min, const char *max)
|
|
|
|
{
|
|
|
|
Assert(sslVerifyProtocolVersion(min) &&
|
|
|
|
sslVerifyProtocolVersion(max));
|
|
|
|
|
|
|
|
/* If at least one of the bounds is not set, the range is valid */
|
|
|
|
if (min == NULL || max == NULL || strlen(min) == 0 || strlen(max) == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the minimum version is the lowest one we accept, then all options
|
|
|
|
* for the maximum are valid.
|
|
|
|
*/
|
|
|
|
if (pg_strcasecmp(min, "TLSv1") == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The minimum bound is valid, and cannot be TLSv1, so using TLSv1 for the
|
|
|
|
* maximum is incorrect.
|
|
|
|
*/
|
|
|
|
if (pg_strcasecmp(max, "TLSv1") == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point we know that we have a mix of TLSv1.1 through 1.3
|
|
|
|
* versions.
|
|
|
|
*/
|
|
|
|
if (pg_strcasecmp(min, max) > 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-07-06 21:19:02 +02:00
|
|
|
|
2005-01-06 02:00:12 +01:00
|
|
|
/*
|
|
|
|
* Obtain user's home directory, return in given buffer
|
|
|
|
*
|
2005-01-06 19:29:11 +01:00
|
|
|
* On Unix, this actually returns the user's home directory. On Windows
|
|
|
|
* it returns the PostgreSQL-specific application data folder.
|
|
|
|
*
|
2005-01-06 02:00:12 +01:00
|
|
|
* This is essentially the same as get_home_path(), but we don't use that
|
|
|
|
* because we don't want to pull path.c into libpq (it pollutes application
|
Fix libpq to not require user's home directory to exist.
Some people like to run libpq-using applications in environments where
there's no home directory. We've broken that scenario before (cf commits
5b4067798 and bd58d9d88), and commit ba005f193 broke it again, by making
it a hard error if we fail to get the home directory name while looking
for ~/.pgpass. The previous precedent is that if we can't get the home
directory name, we should just silently act as though the file we hoped
to find there doesn't exist. Rearrange the new code to honor that.
Looking around, the service-file code added by commit 41a4e4595 had the
same disease. Apparently, that escaped notice because it only runs when
a service name has been specified, which I guess the people who use this
scenario don't do. Nonetheless, it's wrong too, so fix that case as well.
Add a comment about this policy to pqGetHomeDirectory, in the probably
vain hope of forestalling the same error in future. And upgrade the
rather miserable commenting in parseServiceInfo, too.
In passing, also back off parseServiceInfo's assumption that only ENOENT
is an ignorable error from stat() when checking a service file. We would
need to ignore at least ENOTDIR as well (cf 5b4067798), and seeing that
the far-better-tested code for ~/.pgpass treats all stat() failures alike,
I think this code ought to as well.
Per bug #14872 from Dan Watson. Back-patch the .pgpass change to v10
where ba005f193 came in. The service-file bugs are far older, so
back-patch the other changes to all supported branches.
Discussion: https://postgr.es/m/20171025200457.1471.34504@wrigleys.postgresql.org
2017-10-26 01:32:24 +02:00
|
|
|
* namespace).
|
|
|
|
*
|
|
|
|
* Returns true on success, false on failure to obtain the directory name.
|
|
|
|
*
|
|
|
|
* CAUTION: although in most situations failure is unexpected, there are users
|
|
|
|
* who like to run applications in a home-directory-less environment. On
|
|
|
|
* failure, you almost certainly DO NOT want to report an error. Just act as
|
|
|
|
* though whatever file you were hoping to find in the home directory isn't
|
|
|
|
* there (which it isn't).
|
2005-01-06 02:00:12 +01:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
pqGetHomeDirectory(char *buf, int bufsize)
|
|
|
|
{
|
|
|
|
#ifndef WIN32
|
2022-01-10 01:19:02 +01:00
|
|
|
const char *home;
|
2005-01-06 02:00:12 +01:00
|
|
|
|
2022-01-10 01:19:02 +01:00
|
|
|
home = getenv("HOME");
|
|
|
|
if (home == NULL || home[0] == '\0')
|
2022-01-11 19:46:12 +01:00
|
|
|
return pg_get_user_home_dir(geteuid(), buf, bufsize);
|
2022-01-10 01:19:02 +01:00
|
|
|
strlcpy(buf, home, bufsize);
|
2005-01-06 02:00:12 +01:00
|
|
|
return true;
|
|
|
|
#else
|
2005-01-06 19:29:11 +01:00
|
|
|
char tmppath[MAX_PATH];
|
2005-01-06 02:00:12 +01:00
|
|
|
|
2005-01-06 19:29:11 +01:00
|
|
|
ZeroMemory(tmppath, sizeof(tmppath));
|
2005-01-26 20:24:03 +01:00
|
|
|
if (SHGetFolderPath(NULL, CSIDL_APPDATA, NULL, 0, tmppath) != S_OK)
|
2005-01-06 02:00:12 +01:00
|
|
|
return false;
|
2005-01-06 19:29:11 +01:00
|
|
|
snprintf(buf, bufsize, "%s/postgresql", tmppath);
|
2005-01-06 02:00:12 +01:00
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-03-24 04:45:00 +01:00
|
|
|
/*
|
|
|
|
* To keep the API consistent, the locking stubs are always provided, even
|
|
|
|
* if they are not required.
|
2021-06-29 17:31:08 +02:00
|
|
|
*
|
|
|
|
* Since we neglected to provide any error-return convention in the
|
|
|
|
* pgthreadlock_t API, we can't do much except Assert upon failure of any
|
|
|
|
* mutex primitive. Fortunately, such failures appear to be nonexistent in
|
|
|
|
* the field.
|
2004-03-24 04:45:00 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
default_threadlock(int acquire)
|
|
|
|
{
|
|
|
|
#ifdef ENABLE_THREAD_SAFETY
|
2004-06-19 06:22:17 +02:00
|
|
|
#ifndef WIN32
|
2004-03-24 04:45:00 +01:00
|
|
|
static pthread_mutex_t singlethread_lock = PTHREAD_MUTEX_INITIALIZER;
|
2004-06-19 06:22:17 +02:00
|
|
|
#else
|
2004-07-12 16:23:28 +02:00
|
|
|
static pthread_mutex_t singlethread_lock = NULL;
|
|
|
|
static long mutex_initlock = 0;
|
|
|
|
|
|
|
|
if (singlethread_lock == NULL)
|
|
|
|
{
|
|
|
|
while (InterlockedExchange(&mutex_initlock, 1) == 1)
|
|
|
|
/* loop, another thread own the lock */ ;
|
|
|
|
if (singlethread_lock == NULL)
|
2008-05-16 20:30:53 +02:00
|
|
|
{
|
|
|
|
if (pthread_mutex_init(&singlethread_lock, NULL))
|
2021-06-29 17:31:08 +02:00
|
|
|
Assert(false);
|
2008-05-16 20:30:53 +02:00
|
|
|
}
|
2004-07-12 16:23:28 +02:00
|
|
|
InterlockedExchange(&mutex_initlock, 0);
|
|
|
|
}
|
2004-06-19 06:22:17 +02:00
|
|
|
#endif
|
2004-03-24 04:45:00 +01:00
|
|
|
if (acquire)
|
2008-05-16 20:30:53 +02:00
|
|
|
{
|
|
|
|
if (pthread_mutex_lock(&singlethread_lock))
|
2021-06-29 17:31:08 +02:00
|
|
|
Assert(false);
|
2008-05-16 20:30:53 +02:00
|
|
|
}
|
2004-03-24 04:45:00 +01:00
|
|
|
else
|
2008-05-16 20:30:53 +02:00
|
|
|
{
|
|
|
|
if (pthread_mutex_unlock(&singlethread_lock))
|
2021-06-29 17:31:08 +02:00
|
|
|
Assert(false);
|
2008-05-16 20:30:53 +02:00
|
|
|
}
|
2004-03-24 04:45:00 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-12-03 00:20:21 +01:00
|
|
|
pgthreadlock_t
|
|
|
|
PQregisterThreadLock(pgthreadlock_t newhandler)
|
2004-03-24 04:45:00 +01:00
|
|
|
{
|
2004-12-03 00:20:21 +01:00
|
|
|
pgthreadlock_t prev = pg_g_threadlock;
|
2004-03-24 04:45:00 +01:00
|
|
|
|
|
|
|
if (newhandler)
|
2004-12-03 00:20:21 +01:00
|
|
|
pg_g_threadlock = newhandler;
|
2004-03-24 04:45:00 +01:00
|
|
|
else
|
2004-12-03 00:20:21 +01:00
|
|
|
pg_g_threadlock = default_threadlock;
|
|
|
|
|
2004-03-24 04:45:00 +01:00
|
|
|
return prev;
|
|
|
|
}
|