2000-07-21 13:43:26 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
2001-01-12 05:32:07 +01:00
|
|
|
* pg_backup_db.c
|
|
|
|
*
|
2001-03-22 05:01:46 +01:00
|
|
|
* Implements the basic DB functions used by the archiver.
|
2001-01-12 05:32:07 +01:00
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/bin/pg_dump/pg_backup_db.c
|
2002-01-18 18:13:51 +01:00
|
|
|
*
|
UUNET is looking into offering PostgreSQL as a part of a managed web
hosting product, on both shared and dedicated machines. We currently
offer Oracle and MySQL, and it would be a nice middle-ground.
However, as shipped, PostgreSQL lacks the following features we need
that MySQL has:
1. The ability to listen only on a particular IP address. Each
hosting customer has their own IP address, on which all of their
servers (http, ftp, real media, etc.) run.
2. The ability to place the Unix-domain socket in a mode 700 directory.
This allows us to automatically create an empty database, with an
empty DBA password, for new or upgrading customers without having
to interactively set a DBA password and communicate it to (or from)
the customer. This in turn cuts down our install and upgrade times.
3. The ability to connect to the Unix-domain socket from within a
change-rooted environment. We run CGI programs chrooted to the
user's home directory, which is another reason why we need to be
able to specify where the Unix-domain socket is, instead of /tmp.
4. The ability to, if run as root, open a pid file in /var/run as
root, and then setuid to the desired user. (mysqld -u can almost
do this; I had to patch it, too).
The patch below fixes problem 1-3. I plan to address #4, also, but
haven't done so yet. These diffs are big enough that they should give
the PG development team something to think about in the meantime :-)
Also, I'm about to leave for 2 weeks' vacation, so I thought I'd get
out what I have, which works (for the problems it tackles), now.
With these changes, we can set up and run PostgreSQL with scripts the
same way we can with apache or proftpd or mysql.
In summary, this patch makes the following enhancements:
1. Adds an environment variable PGUNIXSOCKET, analogous to MYSQL_UNIX_PORT,
and command line options -k --unix-socket to the relevant programs.
2. Adds a -h option to postmaster to set the hostname or IP address to
listen on instead of the default INADDR_ANY.
3. Extends some library interfaces to support the above.
4. Fixes a few memory leaks in PQconnectdb().
The default behavior is unchanged from stock 7.0.2; if you don't use
any of these new features, they don't change the operation.
David J. MacKenzie
2000-11-13 16:18:15 +01:00
|
|
|
*-------------------------------------------------------------------------
|
2000-07-21 13:43:26 +02:00
|
|
|
*/
|
2014-10-14 20:00:55 +02:00
|
|
|
#include "postgres_fe.h"
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2014-10-14 20:00:55 +02:00
|
|
|
#include "dumputils.h"
|
|
|
|
#include "pg_backup_archiver.h"
|
2001-02-10 03:31:31 +01:00
|
|
|
#include "pg_backup_db.h"
|
2013-03-27 17:10:40 +01:00
|
|
|
#include "pg_backup_utils.h"
|
2001-02-10 03:31:31 +01:00
|
|
|
|
2002-08-20 19:54:45 +02:00
|
|
|
#include <unistd.h>
|
2000-07-21 13:43:26 +02:00
|
|
|
#include <ctype.h>
|
|
|
|
#ifdef HAVE_TERMIOS_H
|
|
|
|
#include <termios.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
#define DB_MAX_ERR_STMT 128
|
|
|
|
|
2012-07-25 06:02:49 +02:00
|
|
|
/* translator: this is a module name */
|
2001-07-03 22:21:50 +02:00
|
|
|
static const char *modulename = gettext_noop("archiver (db)");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2008-04-13 05:49:22 +02:00
|
|
|
static void _check_database_version(ArchiveHandle *AH);
|
2001-08-22 22:23:24 +02:00
|
|
|
static PGconn *_connectDB(ArchiveHandle *AH, const char *newdbname, const char *newUser);
|
2001-08-12 21:02:39 +02:00
|
|
|
static void notice_processor(void *arg, const char *message);
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
static void
|
2008-04-13 05:49:22 +02:00
|
|
|
_check_database_version(ArchiveHandle *AH)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
|
|
|
const char *remoteversion_str;
|
2001-04-25 09:03:20 +02:00
|
|
|
int remoteversion;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2003-06-22 02:56:58 +02:00
|
|
|
remoteversion_str = PQparameterStatus(AH->connection, "server_version");
|
2013-03-26 14:21:57 +01:00
|
|
|
remoteversion = PQserverVersion(AH->connection);
|
|
|
|
if (remoteversion == 0 || !remoteversion_str)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not get server_version from libpq\n");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2011-11-25 21:40:51 +01:00
|
|
|
AH->public.remoteVersionStr = pg_strdup(remoteversion_str);
|
2001-04-25 09:03:20 +02:00
|
|
|
AH->public.remoteVersion = remoteversion;
|
2010-02-24 03:42:55 +01:00
|
|
|
if (!AH->archiveRemoteVersion)
|
|
|
|
AH->archiveRemoteVersion = AH->public.remoteVersionStr;
|
2001-04-25 09:03:20 +02:00
|
|
|
|
2013-03-26 14:21:57 +01:00
|
|
|
if (remoteversion != PG_VERSION_NUM
|
2003-06-22 02:56:58 +02:00
|
|
|
&& (remoteversion < AH->public.minRemoteVersion ||
|
|
|
|
remoteversion > AH->public.maxRemoteVersion))
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2001-08-12 21:02:39 +02:00
|
|
|
write_msg(NULL, "server version: %s; %s version: %s\n",
|
2001-06-27 23:21:37 +02:00
|
|
|
remoteversion_str, progname, PG_VERSION);
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(NULL, "aborting because of server version mismatch\n");
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
/*
|
2001-08-22 22:23:24 +02:00
|
|
|
* Reconnect to the server. If dbname is not NULL, use that database,
|
|
|
|
* else the one associated with the archive handle. If username is
|
2014-05-06 18:12:18 +02:00
|
|
|
* not NULL, use that user name, else the one from the handle. If
|
2004-09-10 22:05:18 +02:00
|
|
|
* both the database and the user match the existing connection already,
|
|
|
|
* nothing will be done.
|
2001-08-22 22:23:24 +02:00
|
|
|
*
|
|
|
|
* Returns 1 in any case.
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
int
|
2001-08-22 22:23:24 +02:00
|
|
|
ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username)
|
2000-08-01 17:51:45 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
PGconn *newConn;
|
2001-08-22 22:23:24 +02:00
|
|
|
const char *newdbname;
|
|
|
|
const char *newusername;
|
|
|
|
|
|
|
|
if (!dbname)
|
|
|
|
newdbname = PQdb(AH->connection);
|
|
|
|
else
|
|
|
|
newdbname = dbname;
|
2000-08-01 17:51:45 +02:00
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
if (!username)
|
|
|
|
newusername = PQuser(AH->connection);
|
2000-08-01 17:51:45 +02:00
|
|
|
else
|
2001-08-22 22:23:24 +02:00
|
|
|
newusername = username;
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
/* Let's see if the request is already satisfied */
|
2004-09-10 22:05:18 +02:00
|
|
|
if (strcmp(newdbname, PQdb(AH->connection)) == 0 &&
|
|
|
|
strcmp(newusername, PQuser(AH->connection)) == 0)
|
2000-08-01 17:51:45 +02:00
|
|
|
return 1;
|
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
newConn = _connectDB(AH, newdbname, newusername);
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
PQfinish(AH->connection);
|
|
|
|
AH->connection = newConn;
|
2001-08-22 22:23:24 +02:00
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Connect to the db again.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
|
|
|
* Note: it's not really all that sensible to use a single-entry password
|
|
|
|
* cache if the username keeps changing. In current usage, however, the
|
2014-05-06 18:12:18 +02:00
|
|
|
* username never does change, so one savedPassword is sufficient. We do
|
2009-02-02 21:07:37 +01:00
|
|
|
* update the cache on the off chance that the password has changed since the
|
|
|
|
* start of the run.
|
2000-08-01 17:51:45 +02:00
|
|
|
*/
|
2001-03-22 05:01:46 +01:00
|
|
|
static PGconn *
|
2001-08-22 22:23:24 +02:00
|
|
|
_connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
|
2000-07-24 08:24:26 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
PGconn *newConn;
|
2009-02-02 21:07:37 +01:00
|
|
|
const char *newdb;
|
|
|
|
const char *newuser;
|
2015-12-23 20:25:31 +01:00
|
|
|
char *password;
|
2007-07-08 21:07:38 +02:00
|
|
|
bool new_pass;
|
2000-07-24 08:24:26 +02:00
|
|
|
|
2001-08-22 22:23:24 +02:00
|
|
|
if (!reqdb)
|
2000-08-01 17:51:45 +02:00
|
|
|
newdb = PQdb(AH->connection);
|
|
|
|
else
|
2009-02-02 21:07:37 +01:00
|
|
|
newdb = reqdb;
|
2000-08-01 17:51:45 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
if (!requser || strlen(requser) == 0)
|
2000-08-01 17:51:45 +02:00
|
|
|
newuser = PQuser(AH->connection);
|
|
|
|
else
|
2009-02-02 21:07:37 +01:00
|
|
|
newuser = requser;
|
2000-08-01 17:51:45 +02:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
ahlog(AH, 1, "connecting to database \"%s\" as user \"%s\"\n",
|
|
|
|
newdb, newuser);
|
2000-07-24 08:24:26 +02:00
|
|
|
|
2015-12-23 20:25:31 +01:00
|
|
|
password = AH->savedPassword ? pg_strdup(AH->savedPassword) : NULL;
|
|
|
|
|
2009-02-26 17:02:39 +01:00
|
|
|
if (AH->promptPassword == TRI_YES && password == NULL)
|
2001-05-17 23:12:49 +02:00
|
|
|
{
|
|
|
|
password = simple_prompt("Password: ", 100, false);
|
|
|
|
if (password == NULL)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "out of memory\n");
|
2001-05-17 23:12:49 +02:00
|
|
|
}
|
|
|
|
|
2000-07-24 08:24:26 +02:00
|
|
|
do
|
|
|
|
{
|
2015-12-23 20:25:31 +01:00
|
|
|
const char *keywords[7];
|
|
|
|
const char *values[7];
|
2010-02-05 04:09:05 +01:00
|
|
|
|
2010-02-26 03:01:40 +01:00
|
|
|
keywords[0] = "host";
|
|
|
|
values[0] = PQhost(AH->connection);
|
|
|
|
keywords[1] = "port";
|
|
|
|
values[1] = PQport(AH->connection);
|
|
|
|
keywords[2] = "user";
|
|
|
|
values[2] = newuser;
|
|
|
|
keywords[3] = "password";
|
|
|
|
values[3] = password;
|
|
|
|
keywords[4] = "dbname";
|
|
|
|
values[4] = newdb;
|
|
|
|
keywords[5] = "fallback_application_name";
|
|
|
|
values[5] = progname;
|
|
|
|
keywords[6] = NULL;
|
|
|
|
values[6] = NULL;
|
2010-02-05 04:09:05 +01:00
|
|
|
|
2007-07-08 21:07:38 +02:00
|
|
|
new_pass = false;
|
2010-02-05 04:09:05 +01:00
|
|
|
newConn = PQconnectdbParams(keywords, values, true);
|
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
if (!newConn)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "failed to reconnect to database\n");
|
2000-08-01 17:51:45 +02:00
|
|
|
|
|
|
|
if (PQstatus(newConn) == CONNECTION_BAD)
|
|
|
|
{
|
2007-12-09 20:01:40 +01:00
|
|
|
if (!PQconnectionNeedsPassword(newConn))
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "could not reconnect to database: %s",
|
2012-06-10 21:20:04 +02:00
|
|
|
PQerrorMessage(newConn));
|
2004-10-01 19:25:55 +02:00
|
|
|
PQfinish(newConn);
|
2007-07-08 21:07:38 +02:00
|
|
|
|
|
|
|
if (password)
|
|
|
|
fprintf(stderr, "Password incorrect\n");
|
|
|
|
|
|
|
|
fprintf(stderr, "Connecting to %s as %s\n",
|
|
|
|
newdb, newuser);
|
|
|
|
|
|
|
|
if (password)
|
|
|
|
free(password);
|
2009-02-26 17:02:39 +01:00
|
|
|
|
|
|
|
if (AH->promptPassword != TRI_NO)
|
|
|
|
password = simple_prompt("Password: ", 100, false);
|
|
|
|
else
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "connection needs password\n");
|
2009-02-26 17:02:39 +01:00
|
|
|
|
2009-02-02 21:07:37 +01:00
|
|
|
if (password == NULL)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "out of memory\n");
|
2007-07-08 21:07:38 +02:00
|
|
|
new_pass = true;
|
2000-08-01 17:51:45 +02:00
|
|
|
}
|
2007-07-08 21:07:38 +02:00
|
|
|
} while (new_pass);
|
2000-07-24 08:24:26 +02:00
|
|
|
|
2015-12-23 20:25:31 +01:00
|
|
|
/*
|
|
|
|
* We want to remember connection's actual password, whether or not we got
|
|
|
|
* it by prompting. So we don't just store the password variable.
|
|
|
|
*/
|
|
|
|
if (PQconnectionUsedPassword(newConn))
|
|
|
|
{
|
|
|
|
if (AH->savedPassword)
|
|
|
|
free(AH->savedPassword);
|
|
|
|
AH->savedPassword = pg_strdup(PQpass(newConn));
|
|
|
|
}
|
|
|
|
if (password)
|
|
|
|
free(password);
|
2001-05-17 23:12:49 +02:00
|
|
|
|
2003-02-14 20:40:42 +01:00
|
|
|
/* check for version mismatch */
|
2008-04-13 05:49:22 +02:00
|
|
|
_check_database_version(AH);
|
2003-02-14 20:40:42 +01:00
|
|
|
|
2001-08-12 21:02:39 +02:00
|
|
|
PQsetNoticeProcessor(newConn, notice_processor, NULL);
|
|
|
|
|
2000-08-01 17:51:45 +02:00
|
|
|
return newConn;
|
2000-07-24 08:24:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2001-05-17 23:12:49 +02:00
|
|
|
/*
|
|
|
|
* Make a database connection with the given parameters. The
|
|
|
|
* connection handle is returned, the parameters are stored in AHX.
|
|
|
|
* An interactive password prompt is automatically issued if required.
|
2009-02-02 21:07:37 +01:00
|
|
|
*
|
|
|
|
* Note: it's not really all that sensible to use a single-entry password
|
|
|
|
* cache if the username keeps changing. In current usage, however, the
|
|
|
|
* username never does change, so one savedPassword is sufficient.
|
2001-05-17 23:12:49 +02:00
|
|
|
*/
|
2012-02-16 19:00:24 +01:00
|
|
|
void
|
2001-03-22 05:01:46 +01:00
|
|
|
ConnectDatabase(Archive *AHX,
|
|
|
|
const char *dbname,
|
|
|
|
const char *pghost,
|
|
|
|
const char *pgport,
|
2001-05-17 23:12:49 +02:00
|
|
|
const char *username,
|
2014-10-14 20:00:55 +02:00
|
|
|
trivalue prompt_password)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2015-12-23 20:25:31 +01:00
|
|
|
char *password;
|
2007-07-08 21:07:38 +02:00
|
|
|
bool new_pass;
|
2000-07-21 13:43:26 +02:00
|
|
|
|
|
|
|
if (AH->connection)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "already connected to a database\n");
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2015-12-23 20:25:31 +01:00
|
|
|
password = AH->savedPassword ? pg_strdup(AH->savedPassword) : NULL;
|
|
|
|
|
2009-02-26 17:02:39 +01:00
|
|
|
if (prompt_password == TRI_YES && password == NULL)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2001-05-17 23:12:49 +02:00
|
|
|
password = simple_prompt("Password: ", 100, false);
|
|
|
|
if (password == NULL)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "out of memory\n");
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
2009-02-26 17:02:39 +01:00
|
|
|
AH->promptPassword = prompt_password;
|
2001-05-17 23:12:49 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Start the connection. Loop until we have a password if requested by
|
|
|
|
* backend.
|
2001-05-17 23:12:49 +02:00
|
|
|
*/
|
|
|
|
do
|
|
|
|
{
|
2015-12-23 20:25:31 +01:00
|
|
|
const char *keywords[7];
|
|
|
|
const char *values[7];
|
2010-02-05 04:09:05 +01:00
|
|
|
|
2010-02-26 03:01:40 +01:00
|
|
|
keywords[0] = "host";
|
|
|
|
values[0] = pghost;
|
|
|
|
keywords[1] = "port";
|
|
|
|
values[1] = pgport;
|
|
|
|
keywords[2] = "user";
|
|
|
|
values[2] = username;
|
|
|
|
keywords[3] = "password";
|
|
|
|
values[3] = password;
|
|
|
|
keywords[4] = "dbname";
|
|
|
|
values[4] = dbname;
|
|
|
|
keywords[5] = "fallback_application_name";
|
|
|
|
values[5] = progname;
|
|
|
|
keywords[6] = NULL;
|
|
|
|
values[6] = NULL;
|
2010-02-05 04:09:05 +01:00
|
|
|
|
2007-07-08 21:07:38 +02:00
|
|
|
new_pass = false;
|
2010-02-05 04:09:05 +01:00
|
|
|
AH->connection = PQconnectdbParams(keywords, values, true);
|
|
|
|
|
2001-05-17 23:12:49 +02:00
|
|
|
if (!AH->connection)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "failed to connect to database\n");
|
2001-05-17 23:12:49 +02:00
|
|
|
|
|
|
|
if (PQstatus(AH->connection) == CONNECTION_BAD &&
|
2007-12-09 20:01:40 +01:00
|
|
|
PQconnectionNeedsPassword(AH->connection) &&
|
2009-02-26 17:02:39 +01:00
|
|
|
password == NULL &&
|
|
|
|
prompt_password != TRI_NO)
|
2001-05-17 23:12:49 +02:00
|
|
|
{
|
|
|
|
PQfinish(AH->connection);
|
|
|
|
password = simple_prompt("Password: ", 100, false);
|
2009-02-02 21:07:37 +01:00
|
|
|
if (password == NULL)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "out of memory\n");
|
2007-07-08 21:07:38 +02:00
|
|
|
new_pass = true;
|
2001-05-17 23:12:49 +02:00
|
|
|
}
|
2007-07-08 21:07:38 +02:00
|
|
|
} while (new_pass);
|
2001-05-17 23:12:49 +02:00
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
/* check to see that the backend connection was successfully made */
|
|
|
|
if (PQstatus(AH->connection) == CONNECTION_BAD)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "connection to database \"%s\" failed: %s",
|
2013-02-20 15:22:47 +01:00
|
|
|
PQdb(AH->connection) ? PQdb(AH->connection) : "",
|
|
|
|
PQerrorMessage(AH->connection));
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2015-12-23 20:25:31 +01:00
|
|
|
/*
|
|
|
|
* We want to remember connection's actual password, whether or not we got
|
|
|
|
* it by prompting. So we don't just store the password variable.
|
|
|
|
*/
|
|
|
|
if (PQconnectionUsedPassword(AH->connection))
|
|
|
|
{
|
|
|
|
if (AH->savedPassword)
|
|
|
|
free(AH->savedPassword);
|
|
|
|
AH->savedPassword = pg_strdup(PQpass(AH->connection));
|
|
|
|
}
|
|
|
|
if (password)
|
|
|
|
free(password);
|
|
|
|
|
2000-07-21 13:43:26 +02:00
|
|
|
/* check for version mismatch */
|
2008-04-13 05:49:22 +02:00
|
|
|
_check_database_version(AH);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2001-08-12 21:02:39 +02:00
|
|
|
PQsetNoticeProcessor(AH->connection, notice_processor, NULL);
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
/*
|
|
|
|
* Close the connection to the database and also cancel off the query if we
|
|
|
|
* have one running.
|
|
|
|
*/
|
2012-02-16 17:49:20 +01:00
|
|
|
void
|
|
|
|
DisconnectDatabase(Archive *AHX)
|
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2013-03-24 16:27:20 +01:00
|
|
|
PGcancel *cancel;
|
|
|
|
char errbuf[1];
|
|
|
|
|
|
|
|
if (!AH->connection)
|
|
|
|
return;
|
2012-02-16 17:49:20 +01:00
|
|
|
|
2013-03-24 16:27:20 +01:00
|
|
|
if (PQtransactionStatus(AH->connection) == PQTRANS_ACTIVE)
|
|
|
|
{
|
|
|
|
if ((cancel = PQgetCancel(AH->connection)))
|
|
|
|
{
|
|
|
|
PQcancel(cancel, errbuf, sizeof(errbuf));
|
|
|
|
PQfreeCancel(cancel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PQfinish(AH->connection);
|
2012-02-16 17:49:20 +01:00
|
|
|
AH->connection = NULL;
|
|
|
|
}
|
|
|
|
|
2012-02-16 19:00:24 +01:00
|
|
|
PGconn *
|
|
|
|
GetConnection(Archive *AHX)
|
|
|
|
{
|
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
|
|
|
return AH->connection;
|
|
|
|
}
|
2001-08-12 21:02:39 +02:00
|
|
|
|
2001-10-25 07:50:21 +02:00
|
|
|
static void
|
|
|
|
notice_processor(void *arg, const char *message)
|
2001-08-12 21:02:39 +02:00
|
|
|
{
|
|
|
|
write_msg(NULL, "%s", message);
|
|
|
|
}
|
|
|
|
|
2012-03-20 22:38:11 +01:00
|
|
|
/* Like exit_horribly(), but with a complaint about a particular query. */
|
|
|
|
static void
|
|
|
|
die_on_query_failure(ArchiveHandle *AH, const char *modulename, const char *query)
|
|
|
|
{
|
|
|
|
write_msg(modulename, "query failed: %s",
|
|
|
|
PQerrorMessage(AH->connection));
|
|
|
|
exit_horribly(modulename, "query was: %s\n", query);
|
|
|
|
}
|
2001-08-12 21:02:39 +02:00
|
|
|
|
2012-02-07 16:07:02 +01:00
|
|
|
void
|
|
|
|
ExecuteSqlStatement(Archive *AHX, const char *query)
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2012-02-07 16:07:02 +01:00
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, query);
|
|
|
|
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
|
|
die_on_query_failure(AH, modulename, query);
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
|
|
|
|
PGresult *
|
|
|
|
ExecuteSqlQuery(Archive *AHX, const char *query, ExecStatusType status)
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
2012-02-07 16:07:02 +01:00
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
res = PQexec(AH->connection, query);
|
|
|
|
if (PQresultStatus(res) != status)
|
|
|
|
die_on_query_failure(AH, modulename, query);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
/*
|
|
|
|
* Convenience function to send a query.
|
|
|
|
* Monitors result to detect COPY statements
|
|
|
|
*/
|
2008-08-16 04:25:06 +02:00
|
|
|
static void
|
|
|
|
ExecuteSqlCommand(ArchiveHandle *AH, const char *qry, const char *desc)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2005-06-21 22:45:44 +02:00
|
|
|
PGconn *conn = AH->connection;
|
2001-03-22 05:01:46 +01:00
|
|
|
PGresult *res;
|
2004-08-29 07:07:03 +02:00
|
|
|
char errStmt[DB_MAX_ERR_STMT];
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2008-08-16 04:25:06 +02:00
|
|
|
#ifdef NOT_USED
|
2009-06-11 16:49:15 +02:00
|
|
|
fprintf(stderr, "Executing: '%s'\n\n", qry);
|
2008-08-16 04:25:06 +02:00
|
|
|
#endif
|
|
|
|
res = PQexec(conn, qry);
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2008-08-16 04:25:06 +02:00
|
|
|
switch (PQresultStatus(res))
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2008-08-16 04:25:06 +02:00
|
|
|
case PGRES_COMMAND_OK:
|
|
|
|
case PGRES_TUPLES_OK:
|
2011-07-28 20:06:57 +02:00
|
|
|
case PGRES_EMPTY_QUERY:
|
2008-08-16 04:25:06 +02:00
|
|
|
/* A-OK */
|
|
|
|
break;
|
|
|
|
case PGRES_COPY_IN:
|
|
|
|
/* Assume this is an expected result */
|
2006-02-05 21:58:47 +01:00
|
|
|
AH->pgCopyIn = true;
|
2008-08-16 04:25:06 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* trouble */
|
Replace a bunch more uses of strncpy() with safer coding.
strncpy() has a well-deserved reputation for being unsafe, so make an
effort to get rid of nearly all occurrences in HEAD.
A large fraction of the remaining uses were passing length less than or
equal to the known strlen() of the source, in which case no null-padding
can occur and the behavior is equivalent to memcpy(), though doubtless
slower and certainly harder to reason about. So just use memcpy() in
these cases.
In other cases, use either StrNCpy() or strlcpy() as appropriate (depending
on whether padding to the full length of the destination buffer seems
useful).
I left a few strncpy() calls alone in the src/timezone/ code, to keep it
in sync with upstream (the IANA tzcode distribution). There are also a
few such calls in ecpg that could possibly do with more analysis.
AFAICT, none of these changes are more than cosmetic, except for the four
occurrences in fe-secure-openssl.c, which are in fact buggy: an overlength
source leads to a non-null-terminated destination buffer and ensuing
misbehavior. These don't seem like security issues, first because no stack
clobber is possible and second because if your values of sslcert etc are
coming from untrusted sources then you've got problems way worse than this.
Still, it's undesirable to have unpredictable behavior for overlength
inputs, so back-patch those four changes to all active branches.
2015-01-24 19:05:42 +01:00
|
|
|
strncpy(errStmt, qry, DB_MAX_ERR_STMT); /* strncpy required here */
|
2004-08-29 07:07:03 +02:00
|
|
|
if (errStmt[DB_MAX_ERR_STMT - 1] != '\0')
|
|
|
|
{
|
|
|
|
errStmt[DB_MAX_ERR_STMT - 4] = '.';
|
|
|
|
errStmt[DB_MAX_ERR_STMT - 3] = '.';
|
|
|
|
errStmt[DB_MAX_ERR_STMT - 2] = '.';
|
|
|
|
errStmt[DB_MAX_ERR_STMT - 1] = '\0';
|
They are two different problems; the TOC entry is important for any
multiline command or to rerun the command easily later.
Whereas displaying the failed SQL command is a matter of fixing the
error
messages.
The latter is complicated by failed COPY commands which, with
die-on-errors
off, results in the data being processed as a command, so dumping the
command will dump all of the data.
In the case of long commands, should the whole command be dumped? eg.
(eg.
several pages of function definition).
In the case of the COPY command, I'm not sure what to do. Obviously, it
would be best to avoid sending the data, but the data and command are
combined (from memory). Also, the 'data' may be in the form of INSERT
statements.
Attached patch produces the first 125 chars of the command:
pg_restore: [archiver (db)] Error while PROCESSING TOC:
pg_restore: [archiver (db)] Error from TOC Entry 26; 1255 16449270
FUNCTION
plpgsql_call_handler() pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_call_handler" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_call_handler() RETURNS
language_handler
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_call_han...
pg_restore: [archiver (db)] Error from TOC Entry 27; 1255 16449271
FUNCTION
plpgsql_validator(oid) pjw
pg_restore: [archiver (db)] could not execute query: ERROR: function
"plpgsql_validator" already exists with same argument types
Command was: CREATE FUNCTION plpgsql_validator(oid) RETURNS void
AS '/var/lib/pgsql-8.0b1/lib/plpgsql', 'plpgsql_validator'
LANGU...
Philip Warner
2004-08-20 22:00:34 +02:00
|
|
|
}
|
2012-03-20 22:38:11 +01:00
|
|
|
warn_or_exit_horribly(AH, modulename, "%s: %s Command was: %s\n",
|
|
|
|
desc, PQerrorMessage(conn), errStmt);
|
2008-08-16 04:25:06 +02:00
|
|
|
break;
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
/*
|
|
|
|
* Process non-COPY table data (that is, INSERT commands).
|
|
|
|
*
|
|
|
|
* The commands have been run together as one long string for compressibility,
|
|
|
|
* and we are receiving them in bufferloads with arbitrary boundaries, so we
|
|
|
|
* have to locate command boundaries and save partial commands across calls.
|
|
|
|
* All state must be kept in AH->sqlparse, not in local variables of this
|
|
|
|
* routine. We assume that AH->sqlparse was filled with zeroes when created.
|
|
|
|
*
|
|
|
|
* We have to lex the data to the extent of identifying literals and quoted
|
|
|
|
* identifiers, so that we can recognize statement-terminating semicolons.
|
|
|
|
* We assume that INSERT data will not contain SQL comments, E'' literals,
|
|
|
|
* or dollar-quoted strings, so this is much simpler than a full SQL lexer.
|
2014-06-13 02:14:32 +02:00
|
|
|
*
|
|
|
|
* Note: when restoring from a pre-9.0 dump file, this code is also used to
|
|
|
|
* process BLOB COMMENTS data, which has the same problem of containing
|
|
|
|
* multiple SQL commands that might be split across bufferloads. Fortunately,
|
|
|
|
* that data won't contain anything complicated to lex either.
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
*/
|
|
|
|
static void
|
2014-06-13 02:14:32 +02:00
|
|
|
ExecuteSimpleCommands(ArchiveHandle *AH, const char *buf, size_t bufLen)
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
{
|
|
|
|
const char *qry = buf;
|
|
|
|
const char *eos = buf + bufLen;
|
|
|
|
|
|
|
|
/* initialize command buffer if first time through */
|
|
|
|
if (AH->sqlparse.curCmd == NULL)
|
|
|
|
AH->sqlparse.curCmd = createPQExpBuffer();
|
|
|
|
|
|
|
|
for (; qry < eos; qry++)
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
char ch = *qry;
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
|
|
|
|
/* For neatness, we skip any newlines between commands */
|
|
|
|
if (!(ch == '\n' && AH->sqlparse.curCmd->len == 0))
|
|
|
|
appendPQExpBufferChar(AH->sqlparse.curCmd, ch);
|
|
|
|
|
|
|
|
switch (AH->sqlparse.state)
|
|
|
|
{
|
|
|
|
case SQL_SCAN: /* Default state == 0, set in _allocAH */
|
|
|
|
if (ch == ';')
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We've found the end of a statement. Send it and reset
|
|
|
|
* the buffer.
|
|
|
|
*/
|
|
|
|
ExecuteSqlCommand(AH, AH->sqlparse.curCmd->data,
|
|
|
|
"could not execute query");
|
|
|
|
resetPQExpBuffer(AH->sqlparse.curCmd);
|
|
|
|
}
|
|
|
|
else if (ch == '\'')
|
|
|
|
{
|
|
|
|
AH->sqlparse.state = SQL_IN_SINGLE_QUOTE;
|
|
|
|
AH->sqlparse.backSlash = false;
|
|
|
|
}
|
|
|
|
else if (ch == '"')
|
|
|
|
{
|
|
|
|
AH->sqlparse.state = SQL_IN_DOUBLE_QUOTE;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SQL_IN_SINGLE_QUOTE:
|
|
|
|
/* We needn't handle '' specially */
|
|
|
|
if (ch == '\'' && !AH->sqlparse.backSlash)
|
|
|
|
AH->sqlparse.state = SQL_SCAN;
|
|
|
|
else if (ch == '\\' && !AH->public.std_strings)
|
|
|
|
AH->sqlparse.backSlash = !AH->sqlparse.backSlash;
|
|
|
|
else
|
|
|
|
AH->sqlparse.backSlash = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SQL_IN_DOUBLE_QUOTE:
|
|
|
|
/* We needn't handle "" specially */
|
|
|
|
if (ch == '"')
|
|
|
|
AH->sqlparse.state = SQL_SCAN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-01-18 20:17:05 +01:00
|
|
|
/*
|
2011-07-28 20:06:57 +02:00
|
|
|
* Implement ahwrite() for direct-to-DB restore
|
2002-01-18 18:13:51 +01:00
|
|
|
*/
|
2011-07-28 20:06:57 +02:00
|
|
|
int
|
2014-10-14 20:00:55 +02:00
|
|
|
ExecuteSqlCommandBuf(Archive *AHX, const char *buf, size_t bufLen)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2014-10-14 20:00:55 +02:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
if (AH->outputKind == OUTPUT_COPYDATA)
|
2002-01-18 20:17:05 +01:00
|
|
|
{
|
2002-01-18 18:13:51 +01:00
|
|
|
/*
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
* COPY data.
|
|
|
|
*
|
2011-07-28 20:06:57 +02:00
|
|
|
* We drop the data on the floor if libpq has failed to enter COPY
|
|
|
|
* mode; this allows us to behave reasonably when trying to continue
|
|
|
|
* after an error in a COPY command.
|
2002-01-18 18:13:51 +01:00
|
|
|
*/
|
2011-07-28 20:06:57 +02:00
|
|
|
if (AH->pgCopyIn &&
|
|
|
|
PQputCopyData(AH->connection, buf, bufLen) <= 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "error returned by PQputCopyData: %s",
|
|
|
|
PQerrorMessage(AH->connection));
|
2011-07-28 20:06:57 +02:00
|
|
|
}
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
else if (AH->outputKind == OUTPUT_OTHERDATA)
|
|
|
|
{
|
|
|
|
/*
|
2014-06-13 02:14:32 +02:00
|
|
|
* Table data expressed as INSERT commands; or, in old dump files,
|
|
|
|
* BLOB COMMENTS data (which is expressed as COMMENT ON commands).
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
*/
|
2014-06-13 02:14:32 +02:00
|
|
|
ExecuteSimpleCommands(AH, buf, bufLen);
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
}
|
2011-07-28 20:06:57 +02:00
|
|
|
else
|
|
|
|
{
|
2002-01-18 18:13:51 +01:00
|
|
|
/*
|
Fix pg_restore's direct-to-database mode for INSERT-style table data.
In commit 6545a901aaf84cb05212bb6a7674059908f527c3, I removed the mini SQL
lexer that was in pg_backup_db.c, thinking that it had no real purpose
beyond separating COPY data from SQL commands, which purpose had been
obsoleted by long-ago fixes in pg_dump's archive file format.
Unfortunately this was in error: that code was also used to identify
command boundaries in INSERT-style table data, which is run together as a
single string in the archive file for better compressibility. As a result,
direct-to-database restores from archive files made with --inserts or
--column-inserts fail in our latest releases, as reported by Dick Visser.
To fix, restore the mini SQL lexer, but simplify it by adjusting the
calling logic so that it's only required to cope with INSERT-style table
data, not arbitrary SQL commands. This allows us to not have to deal with
SQL comments, E'' strings, or dollar-quoted strings, none of which have
ever been emitted by dumpTableData_insert.
Also, fix the lexer to cope with standard-conforming strings, which was the
actual bug that the previous patch was meant to solve.
Back-patch to all supported branches. The previous patch went back to 8.2,
which unfortunately means that the EOL release of 8.2 contains this bug,
but I don't think we're doing another 8.2 release just because of that.
2012-01-06 19:04:09 +01:00
|
|
|
* General SQL commands; we assume that commands will not be split
|
|
|
|
* across calls.
|
|
|
|
*
|
2011-07-28 20:06:57 +02:00
|
|
|
* In most cases the data passed to us will be a null-terminated
|
|
|
|
* string, but if it's not, we have to add a trailing null.
|
2002-01-18 18:13:51 +01:00
|
|
|
*/
|
2011-07-28 20:06:57 +02:00
|
|
|
if (buf[bufLen] == '\0')
|
|
|
|
ExecuteSqlCommand(AH, buf, "could not execute query");
|
|
|
|
else
|
2002-01-18 18:13:51 +01:00
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
char *str = (char *) pg_malloc(bufLen + 1);
|
2011-07-28 20:06:57 +02:00
|
|
|
|
|
|
|
memcpy(str, buf, bufLen);
|
|
|
|
str[bufLen] = '\0';
|
|
|
|
ExecuteSqlCommand(AH, str, "could not execute query");
|
|
|
|
free(str);
|
2002-01-18 18:13:51 +01:00
|
|
|
}
|
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2014-05-06 02:27:16 +02:00
|
|
|
return bufLen;
|
2011-07-28 20:06:57 +02:00
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
/*
|
|
|
|
* Terminate a COPY operation during direct-to-DB restore
|
|
|
|
*/
|
|
|
|
void
|
2014-10-14 20:00:55 +02:00
|
|
|
EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
|
2011-07-28 20:06:57 +02:00
|
|
|
{
|
2014-10-14 20:00:55 +02:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
2011-07-28 20:06:57 +02:00
|
|
|
if (AH->pgCopyIn)
|
2002-01-18 18:13:51 +01:00
|
|
|
{
|
2006-03-04 00:38:30 +01:00
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
if (PQputCopyEnd(AH->connection, NULL) <= 0)
|
2012-03-20 22:38:11 +01:00
|
|
|
exit_horribly(modulename, "error returned by PQputCopyEnd: %s",
|
|
|
|
PQerrorMessage(AH->connection));
|
2000-07-21 13:43:26 +02:00
|
|
|
|
2006-03-04 00:38:30 +01:00
|
|
|
/* Check command status and return to normal libpq state */
|
|
|
|
res = PQgetResult(AH->connection);
|
|
|
|
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
2012-03-20 22:38:11 +01:00
|
|
|
warn_or_exit_horribly(AH, modulename, "COPY failed for table \"%s\": %s",
|
2014-10-17 18:19:05 +02:00
|
|
|
tocEntryTag, PQerrorMessage(AH->connection));
|
2006-03-04 00:38:30 +01:00
|
|
|
PQclear(res);
|
|
|
|
|
2006-02-05 21:58:47 +01:00
|
|
|
AH->pgCopyIn = false;
|
2002-01-18 18:13:51 +01:00
|
|
|
}
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
2014-10-14 20:00:55 +02:00
|
|
|
StartTransaction(Archive *AHX)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2014-10-14 20:00:55 +02:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
2008-08-16 04:25:06 +02:00
|
|
|
ExecuteSqlCommand(AH, "BEGIN", "could not start database transaction");
|
2000-07-21 13:43:26 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 05:01:46 +01:00
|
|
|
void
|
2014-10-14 20:00:55 +02:00
|
|
|
CommitTransaction(Archive *AHX)
|
2000-07-21 13:43:26 +02:00
|
|
|
{
|
2014-10-14 20:00:55 +02:00
|
|
|
ArchiveHandle *AH = (ArchiveHandle *) AHX;
|
|
|
|
|
2008-08-16 04:25:06 +02:00
|
|
|
ExecuteSqlCommand(AH, "COMMIT", "could not commit database transaction");
|
2000-10-31 15:20:30 +01:00
|
|
|
}
|
2004-08-20 18:07:15 +02:00
|
|
|
|
2009-12-14 01:39:11 +01:00
|
|
|
void
|
|
|
|
DropBlobIfExists(ArchiveHandle *AH, Oid oid)
|
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
/*
|
|
|
|
* If we are not restoring to a direct database connection, we have to
|
|
|
|
* guess about how to detect whether the blob exists. Assume new-style.
|
|
|
|
*/
|
|
|
|
if (AH->connection == NULL ||
|
|
|
|
PQserverVersion(AH->connection) >= 90000)
|
2009-12-14 01:39:11 +01:00
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
ahprintf(AH,
|
|
|
|
"SELECT pg_catalog.lo_unlink(oid) "
|
|
|
|
"FROM pg_catalog.pg_largeobject_metadata "
|
|
|
|
"WHERE oid = '%u';\n",
|
|
|
|
oid);
|
2009-12-14 01:39:11 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-02-18 02:29:10 +01:00
|
|
|
/* Restoring to pre-9.0 server, so do it the old way */
|
|
|
|
ahprintf(AH,
|
|
|
|
"SELECT CASE WHEN EXISTS("
|
|
|
|
"SELECT 1 FROM pg_catalog.pg_largeobject WHERE loid = '%u'"
|
|
|
|
") THEN pg_catalog.lo_unlink('%u') END;\n",
|
2009-12-14 01:39:11 +01:00
|
|
|
oid, oid);
|
|
|
|
}
|
|
|
|
}
|