pgindent run for 9.0, second run

This commit is contained in:
Bruce Momjian 2010-07-06 19:19:02 +00:00
parent 52783b212c
commit 239d769e7e
127 changed files with 1503 additions and 1417 deletions

View File

@ -8,7 +8,7 @@
* Darko Prenosil <Darko.Prenosil@finteh.hr>
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
*
* $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.98 2010/06/15 20:29:01 tgl Exp $
* $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.99 2010/07/06 19:18:54 momjian Exp $
* Copyright (c) 2001-2010, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
@ -1797,7 +1797,7 @@ get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals
appendStringInfo(&buf, " AND ");
appendStringInfoString(&buf,
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
if (tgt_pkattvals[i] != NULL)
appendStringInfo(&buf, " = %s",
@ -1880,7 +1880,7 @@ get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals
appendStringInfo(&buf, " AND ");
appendStringInfo(&buf, "%s",
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
val = tgt_pkattvals[i];
@ -1976,8 +1976,8 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
* Build sql statement to look up tuple of interest, ie, the one matching
* src_pkattvals. We used to use "SELECT *" here, but it's simpler to
* generate a result tuple that matches the table's physical structure,
* with NULLs for any dropped columns. Otherwise we have to deal with
* two different tupdescs and everything's very confusing.
* with NULLs for any dropped columns. Otherwise we have to deal with two
* different tupdescs and everything's very confusing.
*/
appendStringInfoString(&buf, "SELECT ");
@ -1990,7 +1990,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
appendStringInfoString(&buf, "NULL");
else
appendStringInfoString(&buf,
quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname)));
quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname)));
}
appendStringInfo(&buf, " FROM %s WHERE ", relname);
@ -2003,7 +2003,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
appendStringInfo(&buf, " AND ");
appendStringInfoString(&buf,
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
if (src_pkattvals[i] != NULL)
appendStringInfo(&buf, " = %s",
@ -2417,9 +2417,9 @@ validate_pkattnums(Relation rel,
/* Validate attnums and convert to internal form */
for (i = 0; i < pknumatts_arg; i++)
{
int pkattnum = pkattnums_arg->values[i];
int lnum;
int j;
int pkattnum = pkattnums_arg->values[i];
int lnum;
int j;
/* Can throw error immediately if out of range */
if (pkattnum <= 0 || pkattnum > natts)

View File

@ -1,7 +1,7 @@
/*
* This is a port of the Double Metaphone algorithm for use in PostgreSQL.
*
* $PostgreSQL: pgsql/contrib/fuzzystrmatch/dmetaphone.c,v 1.14 2010/04/05 02:46:20 adunstan Exp $
* $PostgreSQL: pgsql/contrib/fuzzystrmatch/dmetaphone.c,v 1.15 2010/07/06 19:18:55 momjian Exp $
*
* Double Metaphone computes 2 "sounds like" strings - a primary and an
* alternate. In most cases they are the same, but for foreign names
@ -461,7 +461,7 @@ DoubleMetaphone(char *str, char **codes)
current += 1;
break;
case '\xc7': /* C with cedilla */
case '\xc7': /* C with cedilla */
MetaphAdd(primary, "S");
MetaphAdd(secondary, "S");
current += 1;
@ -1037,7 +1037,7 @@ DoubleMetaphone(char *str, char **codes)
MetaphAdd(secondary, "N");
break;
case '\xd1': /* N with tilde */
case '\xd1': /* N with tilde */
current += 1;
MetaphAdd(primary, "N");
MetaphAdd(secondary, "N");

View File

@ -1,5 +1,5 @@
/*
* $PostgreSQL: pgsql/contrib/pg_archivecleanup/pg_archivecleanup.c,v 1.2 2010/06/17 17:31:27 tgl Exp $
* $PostgreSQL: pgsql/contrib/pg_archivecleanup/pg_archivecleanup.c,v 1.3 2010/07/06 19:18:55 momjian Exp $
*
* pg_archivecleanup.c
*
@ -40,8 +40,9 @@ bool debug = false; /* are we debugging? */
char *archiveLocation; /* where to find the archive? */
char *restartWALFileName; /* the file from which we can restart restore */
char WALFilePath[MAXPGPATH]; /* the file path including archive */
char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we want to
* remain in archive */
char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we
* want to remain in
* archive */
/* =====================================================================
@ -68,14 +69,14 @@ char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we want to
/*
* Initialize allows customized commands into the archive cleanup program.
*
* You may wish to add code to check for tape libraries, etc..
* You may wish to add code to check for tape libraries, etc..
*/
static void
Initialize(void)
{
/*
* This code assumes that archiveLocation is a directory, so we use
* stat to test if it's accessible.
* This code assumes that archiveLocation is a directory, so we use stat
* to test if it's accessible.
*/
struct stat stat_buf;
@ -100,22 +101,21 @@ CleanupPriorWALFiles(void)
while ((xlde = readdir(xldir)) != NULL)
{
/*
* We ignore the timeline part of the XLOG segment identifiers
* in deciding whether a segment is still needed. This
* ensures that we won't prematurely remove a segment from a
* parent timeline. We could probably be a little more
* proactive about removing segments of non-parent timelines,
* but that would be a whole lot more complicated.
* We ignore the timeline part of the XLOG segment identifiers in
* deciding whether a segment is still needed. This ensures that
* we won't prematurely remove a segment from a parent timeline.
* We could probably be a little more proactive about removing
* segments of non-parent timelines, but that would be a whole lot
* more complicated.
*
* We use the alphanumeric sorting property of the filenames
* to decide which ones are earlier than the
* exclusiveCleanupFileName file. Note that this means files
* are not removed in the order they were originally written,
* in case this worries you.
* We use the alphanumeric sorting property of the filenames to
* decide which ones are earlier than the exclusiveCleanupFileName
* file. Note that this means files are not removed in the order
* they were originally written, in case this worries you.
*/
if (strlen(xlde->d_name) == XLOG_DATA_FNAME_LEN &&
strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
{
#ifdef WIN32
snprintf(WALFilePath, MAXPGPATH, "%s\\%s", archiveLocation, xlde->d_name);
@ -152,13 +152,13 @@ CleanupPriorWALFiles(void)
static void
SetWALFileNameForCleanup(void)
{
bool fnameOK = false;
bool fnameOK = false;
/*
* If restartWALFileName is a WAL file name then just use it directly.
* If restartWALFileName is a .backup filename, make sure we use
* the prefix of the filename, otherwise we will remove wrong files
* since 000000010000000000000010.00000020.backup is after
* If restartWALFileName is a WAL file name then just use it directly. If
* restartWALFileName is a .backup filename, make sure we use the prefix
* of the filename, otherwise we will remove wrong files since
* 000000010000000000000010.00000020.backup is after
* 000000010000000000000010.
*/
if (strlen(restartWALFileName) == XLOG_DATA_FNAME_LEN &&
@ -169,17 +169,20 @@ SetWALFileNameForCleanup(void)
}
else if (strlen(restartWALFileName) == XLOG_BACKUP_FNAME_LEN)
{
int args;
int args;
uint32 tli = 1,
log = 0,
seg = 0,
offset = 0;
args = sscanf(restartWALFileName, "%08X%08X%08X.%08X.backup", &tli, &log, &seg, &offset);
if (args == 4)
{
fnameOK = true;
/*
* Use just the prefix of the filename, ignore everything after first period
* Use just the prefix of the filename, ignore everything after
* first period
*/
XLogFileName(exclusiveCleanupFileName, tli, log, seg);
}
@ -205,12 +208,12 @@ usage(void)
printf("Usage:\n");
printf(" %s [OPTION]... ARCHIVELOCATION OLDESTKEPTWALFILE\n", progname);
printf("\n"
"for use as an archive_cleanup_command in the recovery.conf when standby_mode = on:\n"
"for use as an archive_cleanup_command in the recovery.conf when standby_mode = on:\n"
" archive_cleanup_command = 'pg_archivecleanup [OPTION]... ARCHIVELOCATION %%r'\n"
"e.g.\n"
" archive_cleanup_command = 'pg_archivecleanup /mnt/server/archiverdir %%r'\n");
printf("\n"
"or for use as a standalone archive cleaner:\n"
"or for use as a standalone archive cleaner:\n"
"e.g.\n"
" pg_archivecleanup /mnt/server/archiverdir 000000010000000000000010.00000020.backup\n");
printf("\nOptions:\n");
@ -258,9 +261,10 @@ main(int argc, char **argv)
/*
* We will go to the archiveLocation to check restartWALFileName.
* restartWALFileName may not exist anymore, which would not be an error, so
* we separate the archiveLocation and restartWALFileName so we can check
* separately whether archiveLocation exists, if not that is an error
* restartWALFileName may not exist anymore, which would not be an error,
* so we separate the archiveLocation and restartWALFileName so we can
* check separately whether archiveLocation exists, if not that is an
* error
*/
if (optind < argc)
{

View File

@ -4,7 +4,7 @@
* server checks and output routines
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/check.c,v 1.10 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/check.c,v 1.11 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -152,8 +152,8 @@ issue_warnings(migratorContext *ctx, char *sequence_script_file_name)
{
prep_status(ctx, "Adjusting sequences");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d "
"--username \"%s\" -f \"%s\" --dbname template1 >> \"%s\""
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d "
"--username \"%s\" -f \"%s\" --dbname template1 >> \"%s\""
SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user,
sequence_script_file_name, ctx->logfile);
@ -217,7 +217,7 @@ check_cluster_versions(migratorContext *ctx)
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(ctx->new.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
pg_log(ctx, PG_FATAL, "This utility can only upgrade to PostgreSQL version %s.\n",
PG_MAJORVERSION);
PG_MAJORVERSION);
/*
* We can't allow downgrading because we use the target pg_dumpall, and
@ -375,7 +375,7 @@ check_new_db_is_empty(migratorContext *ctx)
*/
void
create_script_for_old_cluster_deletion(migratorContext *ctx,
char **deletion_script_file_name)
char **deletion_script_file_name)
{
FILE *script = NULL;
int tblnum;
@ -389,7 +389,7 @@ create_script_for_old_cluster_deletion(migratorContext *ctx,
if ((script = fopen(*deletion_script_file_name, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",
*deletion_script_file_name);
*deletion_script_file_name);
#ifndef WIN32
/* add shebang header */
@ -420,6 +420,7 @@ create_script_for_old_cluster_deletion(migratorContext *ctx,
}
}
else
/*
* Simply delete the tablespace directory, which might be ".old"
* or a version-specific subdirectory.
@ -433,7 +434,7 @@ create_script_for_old_cluster_deletion(migratorContext *ctx,
#ifndef WIN32
if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
pg_log(ctx, PG_FATAL, "Could not add execute permission to file: %s\n",
*deletion_script_file_name);
*deletion_script_file_name);
#endif
check_ok(ctx);

View File

@ -4,7 +4,7 @@
* controldata functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/controldata.c,v 1.8 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/controldata.c,v 1.9 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -93,6 +93,7 @@ get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
fputs(bufin, ctx->debug_fd);
#ifdef WIN32
/*
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a

View File

@ -4,7 +4,7 @@
* dump functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/dump.c,v 1.6 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/dump.c,v 1.7 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -24,7 +24,7 @@ generate_old_dump(migratorContext *ctx)
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --username \"%s\" "
"--schema-only --binary-upgrade > \"%s/" ALL_DUMP_FILE "\""
SYSTEMQUOTE, ctx->new.bindir, ctx->old.port, ctx->user, ctx->cwd);
SYSTEMQUOTE, ctx->new.bindir, ctx->old.port, ctx->user, ctx->cwd);
check_ok(ctx);
}

View File

@ -4,7 +4,7 @@
* execution functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/exec.c,v 1.7 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/exec.c,v 1.8 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -14,7 +14,7 @@
static void checkBinDir(migratorContext *ctx, ClusterInfo *cluster);
static int check_exec(migratorContext *ctx, const char *dir, const char *cmdName);
static int check_exec(migratorContext *ctx, const char *dir, const char *cmdName);
static const char *validate_exec(const char *path);
static int check_data_dir(migratorContext *ctx, const char *pg_data);
@ -311,5 +311,3 @@ check_data_dir(migratorContext *ctx, const char *pg_data)
return (fail) ? -1 : 0;
}

View File

@ -4,7 +4,7 @@
* file system operations
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/file.c,v 1.12 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/file.c,v 1.13 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -226,7 +226,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
*/
int
pg_scandir(migratorContext *ctx, const char *dirname,
struct dirent ***namelist,
struct dirent *** namelist,
int (*selector) (const struct dirent *))
{
#ifndef HAVE_SCANDIR
@ -235,13 +235,14 @@ pg_scandir(migratorContext *ctx, const char *dirname,
/*
* scandir() is originally from BSD 4.3, which had the third argument as
* non-const. Linux and other C libraries have updated it to use a const.
* http://unix.derkeiler.com/Mailing-Lists/FreeBSD/questions/2005-12/msg00214.html
* http://unix.derkeiler.com/Mailing-Lists/FreeBSD/questions/2005-12/msg002
* 14.html
*
* Here we try to guess which libc's need const, and which don't. The net
* goal here is to try to suppress a compiler warning due to a prototype
* mismatch of const usage. Ideally we would do this via autoconf, but
* autoconf doesn't have a suitable builtin test and it seems overkill
* to add one just to avoid a warning.
* autoconf doesn't have a suitable builtin test and it seems overkill to
* add one just to avoid a warning.
*/
#elif defined(__FreeBSD__) || defined(__bsdi__) || defined(__darwin__) || defined(__OpenBSD__)
/* no const */

View File

@ -4,7 +4,7 @@
* information support functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/info.c,v 1.10 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/info.c,v 1.11 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -18,12 +18,12 @@ static void dbarr_print(migratorContext *ctx, DbInfoArr *arr,
Cluster whichCluster);
static void relarr_print(migratorContext *ctx, RelInfoArr *arr);
static void get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
RelInfoArr *relarr, Cluster whichCluster);
RelInfoArr *relarr, Cluster whichCluster);
static void relarr_free(RelInfoArr *rel_arr);
static void map_rel(migratorContext *ctx, const RelInfo *oldrel,
const RelInfo *newrel, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
const RelInfo *newrel, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
static void map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
const char *old_nspname, const char *old_relname,
const char *new_nspname, const char *new_relname,
@ -31,10 +31,10 @@ static void map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
static RelInfo *relarr_lookup_reloid(migratorContext *ctx,
RelInfoArr *rel_arr, Oid oid, Cluster whichCluster);
RelInfoArr *rel_arr, Oid oid, Cluster whichCluster);
static RelInfo *relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
const char *nspname, const char *relname,
Cluster whichCluster);
const char *nspname, const char *relname,
Cluster whichCluster);
/*
@ -226,13 +226,13 @@ get_db_infos(migratorContext *ctx, DbInfoArr *dbinfs_arr, Cluster whichCluster)
int i_oid;
int i_spclocation;
res = executeQueryOrDie(ctx, conn,
"SELECT d.oid, d.datname, t.spclocation "
"FROM pg_catalog.pg_database d "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON d.dattablespace = t.oid "
"WHERE d.datallowconn = true");
res = executeQueryOrDie(ctx, conn,
"SELECT d.oid, d.datname, t.spclocation "
"FROM pg_catalog.pg_database d "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON d.dattablespace = t.oid "
"WHERE d.datallowconn = true");
i_datname = PQfnumber(res, "datname");
i_oid = PQfnumber(res, "oid");
i_spclocation = PQfnumber(res, "spclocation");
@ -358,7 +358,7 @@ get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
for (relnum = 0; relnum < ntups; relnum++)
{
RelInfo *curr = &relinfos[num_rels++];
const char *tblspace;
const char *tblspace;
curr->reloid = atol(PQgetvalue(res, relnum, i_oid));

View File

@ -4,7 +4,7 @@
* options functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/option.c,v 1.11 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/option.c,v 1.12 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -52,7 +52,7 @@ parseCommandLine(migratorContext *ctx, int argc, char *argv[])
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
int user_id;
if (getenv("PGUSER"))
{
pg_free(ctx->user);
@ -68,7 +68,7 @@ parseCommandLine(migratorContext *ctx, int argc, char *argv[])
/* user lookup and 'root' test must be split because of usage() */
user_id = get_user_info(ctx, &ctx->user);
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
@ -303,7 +303,7 @@ validateDirectoryOption(migratorContext *ctx, char **dirpath,
if ((*dirpath)[strlen(*dirpath) - 1] == '/')
#else
if ((*dirpath)[strlen(*dirpath) - 1] == '/' ||
(*dirpath)[strlen(*dirpath) - 1] == '\\')
(*dirpath)[strlen(*dirpath) - 1] == '\\')
#endif
(*dirpath)[strlen(*dirpath) - 1] = 0;
}

View File

@ -4,7 +4,7 @@
* main source file
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.c,v 1.9 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.c,v 1.10 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -82,7 +82,7 @@ main(int argc, char **argv)
*/
prep_status(&ctx, "Setting next oid for new cluster");
exec_prog(&ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > "
DEVNULL SYSTEMQUOTE,
DEVNULL SYSTEMQUOTE,
ctx.new.bindir, ctx.old.controldata.chkpnt_nxtoid, ctx.new.pgdata);
check_ok(&ctx);
@ -166,10 +166,10 @@ prepare_new_cluster(migratorContext *ctx)
check_ok(ctx);
/*
* We do freeze after analyze so pg_statistic is also frozen.
* template0 is not frozen here, but data rows were frozen by initdb,
* and we set its datfrozenxid and relfrozenxids later to match the
* new xid counter later.
* We do freeze after analyze so pg_statistic is also frozen. template0 is
* not frozen here, but data rows were frozen by initdb, and we set its
* datfrozenxid and relfrozenxids later to match the new xid counter
* later.
*/
prep_status(ctx, "Freezing all rows on the new cluster");
exec_prog(ctx, true,
@ -203,7 +203,7 @@ prepare_new_databases(migratorContext *ctx)
prep_status(ctx, "Creating databases in the new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/psql\" --port %d --username \"%s\" "
"--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
"--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
GLOBALS_DUMP_FILE, ctx->logfile);
@ -226,9 +226,9 @@ create_new_objects(migratorContext *ctx)
prep_status(ctx, "Restoring database schema to new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/psql\" --port %d --username \"%s\" "
"--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
"--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
DB_DUMP_FILE, ctx->logfile);
check_ok(ctx);
@ -300,7 +300,8 @@ void
set_frozenxids(migratorContext *ctx)
{
int dbnum;
PGconn *conn, *conn_template1;
PGconn *conn,
*conn_template1;
PGresult *dbres;
int ntups;
int i_datname;
@ -327,21 +328,21 @@ set_frozenxids(migratorContext *ctx)
ntups = PQntuples(dbres);
for (dbnum = 0; dbnum < ntups; dbnum++)
{
char *datname = PQgetvalue(dbres, dbnum, i_datname);
char *datallowconn= PQgetvalue(dbres, dbnum, i_datallowconn);
char *datname = PQgetvalue(dbres, dbnum, i_datname);
char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
/*
* We must update databases where datallowconn = false, e.g.
* template0, because autovacuum increments their datfrozenxids and
* relfrozenxids even if autovacuum is turned off, and even though
* all the data rows are already frozen To enable this, we
* temporarily change datallowconn.
* We must update databases where datallowconn = false, e.g.
* template0, because autovacuum increments their datfrozenxids and
* relfrozenxids even if autovacuum is turned off, and even though all
* the data rows are already frozen To enable this, we temporarily
* change datallowconn.
*/
if (strcmp(datallowconn, "f") == 0)
PQclear(executeQueryOrDie(ctx, conn_template1,
"UPDATE pg_catalog.pg_database "
"SET datallowconn = true "
"WHERE datname = '%s'", datname));
"UPDATE pg_catalog.pg_database "
"SET datallowconn = true "
"WHERE datname = '%s'", datname));
conn = connectToServer(ctx, datname, CLUSTER_NEW);
@ -357,9 +358,9 @@ set_frozenxids(migratorContext *ctx)
/* Reset datallowconn flag */
if (strcmp(datallowconn, "f") == 0)
PQclear(executeQueryOrDie(ctx, conn_template1,
"UPDATE pg_catalog.pg_database "
"SET datallowconn = false "
"WHERE datname = '%s'", datname));
"UPDATE pg_catalog.pg_database "
"SET datallowconn = false "
"WHERE datname = '%s'", datname));
}
PQclear(dbres);

View File

@ -2,7 +2,7 @@
* pg_upgrade.h
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.h,v 1.14 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.h,v 1.15 2010/07/06 19:18:55 momjian Exp $
*/
#include "postgres.h"
@ -21,7 +21,7 @@
#define MAX_STRING 1024
#define LINE_ALLOC 4096
#define QUERY_ALLOC 8192
#define MIGRATOR_API_VERSION 1
#define MESSAGE_WIDTH "60"
@ -53,19 +53,19 @@
#define CLUSTERNAME(cluster) ((cluster) == CLUSTER_OLD ? "old" : "new")
/* OID system catalog preservation added during PG 9.0 development */
#define TABLE_SPACE_SUBDIRS 201001111
#define TABLE_SPACE_SUBDIRS 201001111
/*
* Each relation is represented by a relinfo structure.
*/
typedef struct
{
char nspname[NAMEDATALEN]; /* namespace name */
char relname[NAMEDATALEN]; /* relation name */
char nspname[NAMEDATALEN]; /* namespace name */
char relname[NAMEDATALEN]; /* relation name */
Oid reloid; /* relation oid */
Oid relfilenode; /* relation relfile node */
Oid toastrelid; /* oid of the toast relation */
char tablespace[MAXPGPATH]; /* relations tablespace path */
char tablespace[MAXPGPATH]; /* relations tablespace path */
} RelInfo;
typedef struct
@ -83,10 +83,10 @@ typedef struct
Oid new; /* Relfilenode of the new relation */
char old_file[MAXPGPATH];
char new_file[MAXPGPATH];
char old_nspname[NAMEDATALEN]; /* old name of the namespace */
char old_relname[NAMEDATALEN]; /* old name of the relation */
char new_nspname[NAMEDATALEN]; /* new name of the namespace */
char new_relname[NAMEDATALEN]; /* new name of the relation */
char old_nspname[NAMEDATALEN]; /* old name of the namespace */
char old_relname[NAMEDATALEN]; /* old name of the relation */
char new_nspname[NAMEDATALEN]; /* new name of the namespace */
char new_relname[NAMEDATALEN]; /* new name of the relation */
} FileNameMap;
/*
@ -161,7 +161,7 @@ typedef enum
*/
typedef enum
{
NONE = 0, /* used for no running servers */
NONE = 0, /* used for no running servers */
CLUSTER_OLD,
CLUSTER_NEW
} Cluster;
@ -177,15 +177,15 @@ typedef long pgpid_t;
typedef struct
{
ControlData controldata; /* pg_control information */
DbInfoArr dbarr; /* dbinfos array */
char *pgdata; /* pathname for cluster's $PGDATA directory */
char *bindir; /* pathname for cluster's executable directory */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
char *major_version_str; /* string PG_VERSION of cluster */
Oid pg_database_oid; /* OID of pg_database relation */
char *libpath; /* pathname for cluster's pkglibdir */
char *tablespace_suffix; /* directory specification */
DbInfoArr dbarr; /* dbinfos array */
char *pgdata; /* pathname for cluster's $PGDATA directory */
char *bindir; /* pathname for cluster's executable directory */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
char *major_version_str; /* string PG_VERSION of cluster */
Oid pg_database_oid; /* OID of pg_database relation */
char *libpath; /* pathname for cluster's pkglibdir */
char *tablespace_suffix; /* directory specification */
} ClusterInfo;
@ -197,11 +197,12 @@ typedef struct
*/
typedef struct
{
ClusterInfo old, new; /* old and new cluster information */
ClusterInfo old,
new; /* old and new cluster information */
const char *progname; /* complete pathname for this program */
char *exec_path; /* full path to my executable */
char *user; /* username for clusters */
char cwd[MAXPGPATH]; /* current working directory, used for output */
char cwd[MAXPGPATH]; /* current working directory, used for output */
char **tablespaces; /* tablespaces */
int num_tablespaces;
char **libraries; /* loadable libraries */
@ -216,37 +217,37 @@ typedef struct
* changes */
bool verbose; /* TRUE -> be verbose in messages */
bool debug; /* TRUE -> log more information */
transferMode transfer_mode; /* copy files or link them? */
transferMode transfer_mode; /* copy files or link them? */
} migratorContext;
/*
* Global variables
*/
extern char scandir_file_pattern[];
extern char scandir_file_pattern[];
/* check.c */
void output_check_banner(migratorContext *ctx, bool *live_check);
void check_old_cluster(migratorContext *ctx, bool live_check,
char **sequence_script_file_name);
void check_old_cluster(migratorContext *ctx, bool live_check,
char **sequence_script_file_name);
void check_new_cluster(migratorContext *ctx);
void report_clusters_compatible(migratorContext *ctx);
void issue_warnings(migratorContext *ctx,
char *sequence_script_file_name);
void output_completion_banner(migratorContext *ctx,
char *deletion_script_file_name);
void issue_warnings(migratorContext *ctx,
char *sequence_script_file_name);
void output_completion_banner(migratorContext *ctx,
char *deletion_script_file_name);
void check_cluster_versions(migratorContext *ctx);
void check_cluster_compatibility(migratorContext *ctx, bool live_check);
void create_script_for_old_cluster_deletion(migratorContext *ctx,
char **deletion_script_file_name);
void create_script_for_old_cluster_deletion(migratorContext *ctx,
char **deletion_script_file_name);
/* controldata.c */
void get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check);
void check_control_data(migratorContext *ctx, ControlData *oldctrl,
void check_control_data(migratorContext *ctx, ControlData *oldctrl,
ControlData *newctrl);
@ -258,8 +259,8 @@ void split_old_dump(migratorContext *ctx);
/* exec.c */
int exec_prog(migratorContext *ctx, bool throw_error,
const char *cmd,...);
int exec_prog(migratorContext *ctx, bool throw_error,
const char *cmd,...);
void verify_directories(migratorContext *ctx);
bool is_server_running(migratorContext *ctx, const char *datadir);
void rename_old_pg_control(migratorContext *ctx);
@ -279,31 +280,28 @@ typedef const char *(*pluginShutdown) (void *pluginData);
typedef struct
{
uint16 oldPageVersion; /* Page layout version of the old
* cluster */
uint16 newPageVersion; /* Page layout version of the new
* cluster */
uint16 oldPageVersion; /* Page layout version of the old cluster */
uint16 newPageVersion; /* Page layout version of the new cluster */
uint16 pluginVersion; /* API version of converter plugin */
void *pluginData; /* Plugin data (set by plugin) */
pluginStartup startup; /* Pointer to plugin's startup function */
pluginConvertFile convertFile; /* Pointer to plugin's file converter
void *pluginData; /* Plugin data (set by plugin) */
pluginStartup startup; /* Pointer to plugin's startup function */
pluginConvertFile convertFile; /* Pointer to plugin's file converter
* function */
pluginConvertPage convertPage; /* Pointer to plugin's page converter
pluginConvertPage convertPage; /* Pointer to plugin's page converter
* function */
pluginShutdown shutdown; /* Pointer to plugin's shutdown function */
} pageCnvCtx;
const char *setupPageConverter(migratorContext *ctx, pageCnvCtx **result);
#else
/* dummy */
typedef void *pageCnvCtx;
#endif
int dir_matching_filenames(const struct dirent *scan_ent);
int pg_scandir(migratorContext *ctx, const char *dirname,
struct dirent ***namelist,
int (*selector) (const struct dirent *));
int dir_matching_filenames(const struct dirent * scan_ent);
int pg_scandir(migratorContext *ctx, const char *dirname,
struct dirent *** namelist,
int (*selector) (const struct dirent *));
const char *copyAndUpdateFile(migratorContext *ctx,
pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
@ -315,21 +313,21 @@ void check_hard_link(migratorContext *ctx);
/* function.c */
void install_support_functions(migratorContext *ctx);
void uninstall_support_functions(migratorContext *ctx);
void uninstall_support_functions(migratorContext *ctx);
void get_loadable_libraries(migratorContext *ctx);
void check_loadable_libraries(migratorContext *ctx);
/* info.c */
FileNameMap *gen_db_file_maps(migratorContext *ctx, DbInfo *old_db,
DbInfo *new_db, int *nmaps, const char *old_pgdata,
const char *new_pgdata);
void get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr,
Cluster whichCluster);
DbInfo *new_db, int *nmaps, const char *old_pgdata,
const char *new_pgdata);
void get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr,
Cluster whichCluster);
DbInfo *dbarr_lookup_db(DbInfoArr *db_arr, const char *db_name);
void dbarr_free(DbInfoArr *db_arr);
void print_maps(migratorContext *ctx, FileNameMap *maps, int n,
const char *dbName);
void print_maps(migratorContext *ctx, FileNameMap *maps, int n,
const char *dbName);
/* option.c */
@ -349,15 +347,15 @@ void init_tablespaces(migratorContext *ctx);
/* server.c */
PGconn *connectToServer(migratorContext *ctx, const char *db_name,
PGconn *connectToServer(migratorContext *ctx, const char *db_name,
Cluster whichCluster);
PGresult *executeQueryOrDie(migratorContext *ctx, PGconn *conn,
PGresult *executeQueryOrDie(migratorContext *ctx, PGconn *conn,
const char *fmt,...);
void start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet);
void start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet);
void stop_postmaster(migratorContext *ctx, bool fast, bool quiet);
uint32 get_major_server_version(migratorContext *ctx, char **verstr,
Cluster whichCluster);
uint32 get_major_server_version(migratorContext *ctx, char **verstr,
Cluster whichCluster);
void check_for_libpq_envvars(migratorContext *ctx);
@ -381,22 +379,22 @@ const char *getErrorText(int errNum);
/* version.c */
void new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
/* version_old_8_3.c */
void old_8_3_check_for_name_data_type_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_tsquery_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_isn_and_int8_passing_mismatch(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_rebuild_tsvector_tables(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
char *old_8_3_create_sequence_script(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_name_data_type_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_tsquery_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_isn_and_int8_passing_mismatch(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_rebuild_tsvector_tables(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
char *old_8_3_create_sequence_script(migratorContext *ctx,
Cluster whichCluster);

View File

@ -4,7 +4,7 @@
* relfilenode functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/relfilenode.c,v 1.7 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/relfilenode.c,v 1.8 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -21,8 +21,8 @@ static void transfer_relfile(migratorContext *ctx, pageCnvCtx *pageConverter,
const char *newnspname, const char *newrelname);
/* used by scandir(), must be global */
char scandir_file_pattern[MAXPGPATH];
char scandir_file_pattern[MAXPGPATH];
/*
* transfer_all_new_dbs()
*

View File

@ -4,7 +4,7 @@
* database server functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/server.c,v 1.7 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/server.c,v 1.8 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -181,11 +181,11 @@ start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet)
}
/*
* On Win32, we can't send both server output and pg_ctl output
* to the same file because we get the error:
* "The process cannot access the file because it is being used by another process."
* so we have to send pg_ctl output to 'nul'.
*/
* On Win32, we can't send both server output and pg_ctl output to the
* same file because we get the error: "The process cannot access the file
* because it is being used by another process." so we have to send pg_ctl
* output to 'nul'.
*/
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" "
"-o \"-p %d -c autovacuum=off "
@ -233,13 +233,13 @@ stop_postmaster(migratorContext *ctx, bool fast, bool quiet)
/* See comment in start_postmaster() about why win32 output is ignored. */
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" %s stop >> "
"\"%s\" 2>&1" SYSTEMQUOTE,
bindir, ctx->logfile, datadir, fast ? "-m fast" : "",
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" %s stop >> "
"\"%s\" 2>&1" SYSTEMQUOTE,
bindir, ctx->logfile, datadir, fast ? "-m fast" : "",
#ifndef WIN32
ctx->logfile);
ctx->logfile);
#else
DEVNULL);
DEVNULL);
#endif
exec_prog(ctx, fast ? false : true, "%s", cmd);

View File

@ -4,14 +4,14 @@
* tablespace functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/tablespace.c,v 1.5 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/tablespace.c,v 1.6 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
static void get_tablespace_paths(migratorContext *ctx);
static void set_tablespace_directory_suffix(migratorContext *ctx,
Cluster whichCluster);
Cluster whichCluster);
void
@ -52,7 +52,7 @@ get_tablespace_paths(migratorContext *ctx)
if ((ctx->num_tablespaces = PQntuples(res)) != 0)
ctx->tablespaces = (char **) pg_malloc(ctx,
ctx->num_tablespaces * sizeof(char *));
ctx->num_tablespaces * sizeof(char *));
else
ctx->tablespaces = NULL;

View File

@ -4,7 +4,7 @@
* utility functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade/util.c,v 1.4 2010/07/03 16:33:14 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade/util.c,v 1.5 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
@ -156,8 +156,8 @@ quote_identifier(migratorContext *ctx, const char *s)
int
get_user_info(migratorContext *ctx, char **user_name)
{
int user_id;
int user_id;
#ifndef WIN32
struct passwd *pw = getpwuid(geteuid());

View File

@ -5,7 +5,7 @@
* to control oid and relfilenode assignment
*
* Copyright (c) 2010, PostgreSQL Global Development Group
* $PostgreSQL: pgsql/contrib/pg_upgrade_support/pg_upgrade_support.c,v 1.4 2010/07/03 16:33:15 momjian Exp $
* $PostgreSQL: pgsql/contrib/pg_upgrade_support/pg_upgrade_support.c,v 1.5 2010/07/06 19:18:55 momjian Exp $
*/
#include "postgres.h"
@ -21,7 +21,7 @@
* not be compiling against PG 9.0.
*/
extern void EnumValuesCreate(Oid enumTypeOid, List *vals,
Oid binary_upgrade_next_pg_enum_oid);
Oid binary_upgrade_next_pg_enum_oid);
#ifdef PG_MODULE_MAGIC
PG_MODULE_MAGIC;
@ -34,13 +34,13 @@ extern PGDLLIMPORT Oid binary_upgrade_next_heap_relfilenode;
extern PGDLLIMPORT Oid binary_upgrade_next_toast_relfilenode;
extern PGDLLIMPORT Oid binary_upgrade_next_index_relfilenode;
Datum set_next_pg_type_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_array_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_toast_oid(PG_FUNCTION_ARGS);
Datum set_next_heap_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_toast_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_index_relfilenode(PG_FUNCTION_ARGS);
Datum add_pg_enum_label(PG_FUNCTION_ARGS);
Datum set_next_pg_type_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_array_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_toast_oid(PG_FUNCTION_ARGS);
Datum set_next_heap_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_toast_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_index_relfilenode(PG_FUNCTION_ARGS);
Datum add_pg_enum_label(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(set_next_pg_type_oid);
PG_FUNCTION_INFO_V1(set_next_pg_type_array_oid);
@ -115,11 +115,10 @@ add_pg_enum_label(PG_FUNCTION_ARGS)
{
Oid enumoid = PG_GETARG_OID(0);
Oid typoid = PG_GETARG_OID(1);
Name label = PG_GETARG_NAME(2);
Name label = PG_GETARG_NAME(2);
EnumValuesCreate(typoid, list_make1(makeString(NameStr(*label))),
enumoid);
PG_RETURN_VOID();
}

View File

@ -4,7 +4,7 @@
* A simple benchmark program for PostgreSQL
* Originally written by Tatsuo Ishii and enhanced by many contributors.
*
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.98 2010/03/23 01:29:22 itagaki Exp $
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.99 2010/07/06 19:18:55 momjian Exp $
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
@ -2161,7 +2161,7 @@ threadRun(void *arg)
TState *thread = (TState *) arg;
CState *state = thread->state;
TResult *result;
FILE *logfile = NULL; /* per-thread log file */
FILE *logfile = NULL; /* per-thread log file */
instr_time start,
end;
int nstate = thread->nstate;
@ -2273,7 +2273,7 @@ threadRun(void *arg)
goto done;
}
FD_SET (sock, &input_mask);
FD_SET(sock, &input_mask);
if (maxsock < sock)
maxsock = sock;
@ -2407,8 +2407,8 @@ pthread_create(pthread_t *thread,
/*
* Set a different random seed in each child process. Otherwise they all
* inherit the parent's state and generate the same "random" sequence.
* (In the threaded case, the different threads will obtain subsets of the
* inherit the parent's state and generate the same "random" sequence. (In
* the threaded case, the different threads will obtain subsets of the
* output of a single random() sequence, which should be okay for our
* purposes.)
*/
@ -2484,7 +2484,7 @@ typedef struct win32_pthread
void *(*routine) (void *);
void *arg;
void *result;
} win32_pthread;
} win32_pthread;
static unsigned __stdcall
win32_pthread_run(void *arg)

View File

@ -33,7 +33,7 @@
*
* $From: sha2.c,v 1.1 2001/11/08 00:01:51 adg Exp adg $
*
* $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.12 2010/04/02 15:21:20 mha Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.13 2010/07/06 19:18:55 momjian Exp $
*/
#include "postgres.h"
@ -98,7 +98,7 @@
*
* NOTE: The naming of R and S appears backwards here (R is a SHIFT and
* S is a ROTATION) because the SHA-256/384/512 description document
* (see http://www.iwar.org.uk/comsec/resources/cipher/sha256-384-512.pdf)
* (see http://www.iwar.org.uk/comsec/resources/cipher/sha256-384-512.pdf)
* uses this same "backwards" definition.
*/
/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */

View File

@ -1,5 +1,5 @@
/*
* $PostgreSQL: pgsql/contrib/xml2/xpath.c,v 1.29 2010/03/03 19:10:22 tgl Exp $
* $PostgreSQL: pgsql/contrib/xml2/xpath.c,v 1.30 2010/07/06 19:18:55 momjian Exp $
*
* Parser interface for DOM-based parser (libxml) rather than
* stream-based SAX-type parser
@ -631,8 +631,8 @@ xpath_table(PG_FUNCTION_ARGS)
}
/*
* Setup the parser. This should happen after we are done evaluating
* the query, in case it calls functions that set up libxml differently.
* Setup the parser. This should happen after we are done evaluating the
* query, in case it calls functions that set up libxml differently.
*/
pgxml_parser_init();

View File

@ -1,5 +1,5 @@
/*
* $PostgreSQL: pgsql/contrib/xml2/xslt_proc.c,v 1.20 2010/03/03 19:10:22 tgl Exp $
* $PostgreSQL: pgsql/contrib/xml2/xslt_proc.c,v 1.21 2010/07/06 19:18:55 momjian Exp $
*
* XSLT processing functions (requiring libxslt)
*
@ -28,8 +28,7 @@
#include <libxslt/xsltInternals.h>
#include <libxslt/transform.h>
#include <libxslt/xsltutils.h>
#endif /* USE_LIBXSLT */
#endif /* USE_LIBXSLT */
/* externally accessible functions */
@ -45,8 +44,7 @@ extern void pgxml_parser_init(void);
static void parse_params(const char **params, text *paramstr);
#define MAXPARAMS 20 /* must be even, see parse_params() */
#endif /* USE_LIBXSLT */
#endif /* USE_LIBXSLT */
PG_FUNCTION_INFO_V1(xslt_process);
@ -130,15 +128,13 @@ xslt_process(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
PG_RETURN_TEXT_P(cstring_to_text_with_len((char *) resstr, reslen));
#else /* !USE_LIBXSLT */
#else /* !USE_LIBXSLT */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("xslt_process() is not available without libxslt")));
PG_RETURN_NULL();
#endif /* USE_LIBXSLT */
#endif /* USE_LIBXSLT */
}
#ifdef USE_LIBXSLT
@ -191,4 +187,4 @@ parse_params(const char **params, text *paramstr)
params[i] = NULL;
}
#endif /* USE_LIBXSLT */
#endif /* USE_LIBXSLT */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.291 2010/05/02 22:37:43 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.292 2010/07/06 19:18:55 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -4126,10 +4126,10 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
/*
* We're about to remove tuples. In Hot Standby mode, ensure that there's
* no queries running for which the removed tuples are still visible.
*
* Not all HEAP2_CLEAN records remove tuples with xids, so we only want
* to conflict on the records that cause MVCC failures for user queries.
* If latestRemovedXid is invalid, skip conflict processing.
*
* Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
* conflict on the records that cause MVCC failures for user queries. If
* latestRemovedXid is invalid, skip conflict processing.
*/
if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.24 2010/04/22 02:15:45 sriggs Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.25 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -122,7 +122,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
TransactionId ignore = InvalidTransactionId; /* return value not needed */
TransactionId ignore = InvalidTransactionId; /* return value not
* needed */
/* OK to prune */
(void) heap_page_prune(relation, buffer, OldestXmin, true, &ignore);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.122 2010/03/28 09:27:01 sriggs Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.123 2010/07/06 19:18:55 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -720,7 +720,7 @@ _bt_page_recyclable(Page page)
*/
void
_bt_delitems_vacuum(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed)
OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
@ -797,7 +797,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
void
_bt_delitems_delete(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, Relation heapRel)
OffsetNumber *itemnos, int nitems, Relation heapRel)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
@ -847,8 +847,8 @@ _bt_delitems_delete(Relation rel, Buffer buf,
rdata[0].next = &(rdata[1]);
/*
* We need the target-offsets array whether or not we store the
* to allow us to find the latestRemovedXid on a standby server.
* We need the target-offsets array whether or not we store the to
* allow us to find the latestRemovedXid on a standby server.
*/
rdata[1].data = (char *) itemnos;
rdata[1].len = nitems * sizeof(OffsetNumber);

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.68 2010/04/30 06:34:29 heikki Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.69 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -568,23 +568,26 @@ static TransactionId
btree_xlog_delete_get_latestRemovedXid(XLogRecord *record)
{
xl_btree_delete *xlrec = (xl_btree_delete *) XLogRecGetData(record);
OffsetNumber *unused;
Buffer ibuffer, hbuffer;
Page ipage, hpage;
ItemId iitemid, hitemid;
IndexTuple itup;
OffsetNumber *unused;
Buffer ibuffer,
hbuffer;
Page ipage,
hpage;
ItemId iitemid,
hitemid;
IndexTuple itup;
HeapTupleHeader htuphdr;
BlockNumber hblkno;
OffsetNumber hoffnum;
TransactionId latestRemovedXid = InvalidTransactionId;
TransactionId htupxid = InvalidTransactionId;
int i;
BlockNumber hblkno;
OffsetNumber hoffnum;
TransactionId latestRemovedXid = InvalidTransactionId;
TransactionId htupxid = InvalidTransactionId;
int i;
/*
* If there's nothing running on the standby we don't need to derive
* a full latestRemovedXid value, so use a fast path out of here.
* That returns InvalidTransactionId, and so will conflict with
* users, but since we just worked out that's zero people, its OK.
* If there's nothing running on the standby we don't need to derive a
* full latestRemovedXid value, so use a fast path out of here. That
* returns InvalidTransactionId, and so will conflict with users, but
* since we just worked out that's zero people, its OK.
*/
if (CountDBBackends(InvalidOid) == 0)
return latestRemovedXid;
@ -598,8 +601,8 @@ btree_xlog_delete_get_latestRemovedXid(XLogRecord *record)
ipage = (Page) BufferGetPage(ibuffer);
/*
* Loop through the deleted index items to obtain the TransactionId
* from the heap items they point to.
* Loop through the deleted index items to obtain the TransactionId from
* the heap items they point to.
*/
unused = (OffsetNumber *) ((char *) xlrec + SizeOfBtreeDelete);
@ -624,8 +627,8 @@ btree_xlog_delete_get_latestRemovedXid(XLogRecord *record)
hpage = (Page) BufferGetPage(hbuffer);
/*
* Look up the heap tuple header that the index tuple points at
* by using the heap node supplied with the xlrec. We can't use
* Look up the heap tuple header that the index tuple points at by
* using the heap node supplied with the xlrec. We can't use
* heap_fetch, since it uses ReadBuffer rather than XLogReadBuffer.
* Note that we are not looking at tuple data here, just headers.
*/
@ -651,8 +654,8 @@ btree_xlog_delete_get_latestRemovedXid(XLogRecord *record)
htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
/*
* Get the heap tuple's xmin/xmax and ratchet up the latestRemovedXid.
* No need to consider xvac values here.
* Get the heap tuple's xmin/xmax and ratchet up the
* latestRemovedXid. No need to consider xvac values here.
*/
htupxid = HeapTupleHeaderGetXmin(htuphdr);
if (TransactionIdFollows(htupxid, latestRemovedXid))
@ -667,7 +670,8 @@ btree_xlog_delete_get_latestRemovedXid(XLogRecord *record)
/*
* Conjecture: if hitemid is dead then it had xids before the xids
* marked on LP_NORMAL items. So we just ignore this item and move
* onto the next, for the purposes of calculating latestRemovedxids.
* onto the next, for the purposes of calculating
* latestRemovedxids.
*/
}
else
@ -679,13 +683,12 @@ btree_xlog_delete_get_latestRemovedXid(XLogRecord *record)
UnlockReleaseBuffer(ibuffer);
/*
* Note that if all heap tuples were LP_DEAD then we will be
* returning InvalidTransactionId here. That can happen if we are
* re-replaying this record type, though that will be before the
* consistency point and will not cause problems. It should
* happen very rarely after the consistency point, though note
* that we can't tell the difference between this and the fast
* path exit above. May need to change that in future.
* Note that if all heap tuples were LP_DEAD then we will be returning
* InvalidTransactionId here. That can happen if we are re-replaying this
* record type, though that will be before the consistency point and will
* not cause problems. It should happen very rarely after the consistency
* point, though note that we can't tell the difference between this and
* the fast path exit above. May need to change that in future.
*/
return latestRemovedXid;
}
@ -954,6 +957,7 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
switch (info)
{
case XLOG_BTREE_DELETE:
/*
* Btree delete records can conflict with standby queries. You
* might think that vacuum records would conflict as well, but
@ -972,6 +976,7 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
break;
case XLOG_BTREE_REUSE_PAGE:
/*
* Btree reuse page records exist to provide a conflict point
* when we reuse pages in the index via the FSM. That's all it
@ -1143,7 +1148,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
appendStringInfo(buf, "delete: index %u/%u/%u; iblk %u, heap %u/%u/%u;",
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->block,
xlrec->hnode.spcNode, xlrec->hnode.dbNode, xlrec->hnode.relNode);
break;
@ -1176,7 +1181,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "reuse_page: rel %u/%u/%u; latestRemovedXid %u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode, xlrec->latestRemovedXid);
xlrec->node.relNode, xlrec->latestRemovedXid);
break;
}
default:

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.61 2010/04/28 00:09:05 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.62 2010/07/06 19:18:55 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@ -1201,7 +1201,7 @@ StandbyTransactionIdIsPrepared(TransactionId xid)
Assert(TransactionIdIsValid(xid));
if (max_prepared_xacts <= 0)
return false; /* nothing to do */
return false; /* nothing to do */
/* Read and validate file */
buf = ReadTwoPhaseFile(xid, false);

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.292 2010/06/29 18:44:58 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.293 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1057,9 +1057,9 @@ RecordTransactionCommit(void)
* Asynchronous commit case:
*
* This enables possible committed transaction loss in the case of a
* postmaster crash because WAL buffers are left unwritten.
* Ideally we could issue the WAL write without the fsync, but
* some wal_sync_methods do not allow separate write/fsync.
* postmaster crash because WAL buffers are left unwritten. Ideally we
* could issue the WAL write without the fsync, but some
* wal_sync_methods do not allow separate write/fsync.
*
* Report the latest async commit LSN, so that the WAL writer knows to
* flush this commit.
@ -1354,12 +1354,12 @@ RecordTransactionAbort(bool isSubXact)
/*
* Report the latest async abort LSN, so that the WAL writer knows to
* flush this abort. There's nothing to be gained by delaying this,
* since WALWriter may as well do this when it can. This is important
* with streaming replication because if we don't flush WAL regularly
* we will find that large aborts leave us with a long backlog for
* when commits occur after the abort, increasing our window of data
* loss should problems occur at that point.
* flush this abort. There's nothing to be gained by delaying this, since
* WALWriter may as well do this when it can. This is important with
* streaming replication because if we don't flush WAL regularly we will
* find that large aborts leave us with a long backlog for when commits
* occur after the abort, increasing our window of data loss should
* problems occur at that point.
*/
if (!isSubXact)
XLogSetAsyncCommitLSN(XactLastRecEnd);

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.429 2010/07/03 22:15:45 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.430 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -363,7 +363,7 @@ typedef struct XLogCtlData
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
uint32 lastRemovedLog; /* latest removed/recycled XLOG segment */
uint32 lastRemovedLog; /* latest removed/recycled XLOG segment */
uint32 lastRemovedSeg;
/* Protected by WALWriteLock: */
@ -379,6 +379,7 @@ typedef struct XLogCtlData
int XLogCacheBlck; /* highest allocated xlog buffer index */
TimeLineID ThisTimeLineID;
TimeLineID RecoveryTargetTLI;
/*
* archiveCleanupCommand is read from recovery.conf but needs to be in
* shared memory so that the bgwriter process can access it.
@ -480,13 +481,13 @@ static uint32 readId = 0;
static uint32 readSeg = 0;
static uint32 readOff = 0;
static uint32 readLen = 0;
static int readSource = 0; /* XLOG_FROM_* code */
static int readSource = 0; /* XLOG_FROM_* code */
/*
* Keeps track of which sources we've tried to read the current WAL
* record from and failed.
*/
static int failedSources = 0; /* OR of XLOG_FROM_* codes */
static int failedSources = 0; /* OR of XLOG_FROM_* codes */
/*
* These variables track when we last obtained some WAL data to process,
@ -495,7 +496,7 @@ static int failedSources = 0; /* OR of XLOG_FROM_* codes */
* to process right now.)
*/
static TimestampTz XLogReceiptTime = 0;
static int XLogReceiptSource = 0; /* XLOG_FROM_* code */
static int XLogReceiptSource = 0; /* XLOG_FROM_* code */
/* Buffer for currently read page (XLOG_BLCKSZ bytes) */
static char *readBuf = NULL;
@ -574,7 +575,7 @@ static int XLogFileReadAnyTLI(uint32 log, uint32 seg, int emode,
int sources);
static bool XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
bool randAccess);
static int emode_for_corrupt_record(int emode, XLogRecPtr RecPtr);
static int emode_for_corrupt_record(int emode, XLogRecPtr RecPtr);
static void XLogFileClose(void);
static bool RestoreArchivedFile(char *path, const char *xlogfname,
const char *recovername, off_t expectedSize);
@ -2139,13 +2140,14 @@ XLogBackgroundFlush(void)
}
/*
* If already known flushed, we're done. Just need to check if we
* are holding an open file handle to a logfile that's no longer
* in use, preventing the file from being deleted.
* If already known flushed, we're done. Just need to check if we are
* holding an open file handle to a logfile that's no longer in use,
* preventing the file from being deleted.
*/
if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush))
{
if (openLogFile >= 0) {
if (openLogFile >= 0)
{
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
XLogFileClose();
@ -4348,8 +4350,8 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
XLogFileName(xlogfname, endTLI, endLogId, endLogSeg);
/*
* Write comment to history file to explain why and where timeline changed.
* Comment varies according to the recovery target used.
* Write comment to history file to explain why and where timeline
* changed. Comment varies according to the recovery target used.
*/
if (recoveryTarget == RECOVERY_TARGET_XID)
snprintf(buffer, sizeof(buffer),
@ -5029,8 +5031,8 @@ parseRecoveryCommandFileLine(char *cmdline, char **key_p, char **value_p)
*key_p = *value_p = NULL;
/*
* Allocate the buffer on first use. It's used to hold both the
* parameter name and value.
* Allocate the buffer on first use. It's used to hold both the parameter
* name and value.
*/
if (buf == NULL)
buf = malloc(MAXPGPATH + 1);
@ -5076,7 +5078,7 @@ parseRecoveryCommandFileLine(char *cmdline, char **key_p, char **value_p)
}
}
else if (*ptr == '\0')
return false; /* unterminated quoted string */
return false; /* unterminated quoted string */
else
*(bufp++) = *ptr;
@ -5604,8 +5606,8 @@ void
GetXLogReceiptTime(TimestampTz *rtime, bool *fromStream)
{
/*
* This must be executed in the startup process, since we don't export
* the relevant state to shared memory.
* This must be executed in the startup process, since we don't export the
* relevant state to shared memory.
*/
Assert(InRecovery);
@ -5637,8 +5639,8 @@ static void
CheckRequiredParameterValues(void)
{
/*
* For archive recovery, the WAL must be generated with at least
* 'archive' wal_level.
* For archive recovery, the WAL must be generated with at least 'archive'
* wal_level.
*/
if (InArchiveRecovery && ControlFile->wal_level == WAL_LEVEL_MINIMAL)
{
@ -5648,8 +5650,8 @@ CheckRequiredParameterValues(void)
}
/*
* For Hot Standby, the WAL must be generated with 'hot_standby' mode,
* and we must have at least as many backend slots as the primary.
* For Hot Standby, the WAL must be generated with 'hot_standby' mode, and
* we must have at least as many backend slots as the primary.
*/
if (InArchiveRecovery && EnableHotStandby)
{
@ -5785,8 +5787,9 @@ StartupXLOG(void)
ControlFile->checkPointCopy.ThisTimeLineID)));
/*
* Save the selected recovery target timeline ID and archive_cleanup_command
* in shared memory so that other processes can see them
* Save the selected recovery target timeline ID and
* archive_cleanup_command in shared memory so that other processes can
* see them
*/
XLogCtl->RecoveryTargetTLI = recoveryTargetTLI;
strncpy(XLogCtl->archiveCleanupCommand,
@ -5800,8 +5803,8 @@ StartupXLOG(void)
(errmsg("entering standby mode")));
else if (recoveryTarget == RECOVERY_TARGET_XID)
ereport(LOG,
(errmsg("starting point-in-time recovery to XID %u",
recoveryTargetXid)));
(errmsg("starting point-in-time recovery to XID %u",
recoveryTargetXid)));
else if (recoveryTarget == RECOVERY_TARGET_TIME)
ereport(LOG,
(errmsg("starting point-in-time recovery to %s",
@ -5940,6 +5943,7 @@ StartupXLOG(void)
if (InRecovery)
{
int rmid;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@ -6008,8 +6012,8 @@ StartupXLOG(void)
CheckRequiredParameterValues();
/*
* Initialize for Hot Standby, if enabled. We won't let backends
* in yet, not until we've reached the min recovery point specified in
* Initialize for Hot Standby, if enabled. We won't let backends in
* yet, not until we've reached the min recovery point specified in
* control file and we've established a recovery snapshot from a
* running-xacts WAL record.
*/
@ -6038,9 +6042,9 @@ StartupXLOG(void)
/*
* If we're beginning at a shutdown checkpoint, we know that
* nothing was running on the master at this point. So fake-up
* an empty running-xacts record and use that here and now.
* Recover additional standby state for prepared transactions.
* nothing was running on the master at this point. So fake-up an
* empty running-xacts record and use that here and now. Recover
* additional standby state for prepared transactions.
*/
if (wasShutdown)
{
@ -6048,10 +6052,10 @@ StartupXLOG(void)
TransactionId latestCompletedXid;
/*
* Construct a RunningTransactions snapshot representing a shut
* down server, with only prepared transactions still alive.
* We're never overflowed at this point because all subxids
* are listed with their parent prepared transactions.
* Construct a RunningTransactions snapshot representing a
* shut down server, with only prepared transactions still
* alive. We're never overflowed at this point because all
* subxids are listed with their parent prepared transactions.
*/
running.xcnt = nxids;
running.subxid_overflow = false;
@ -6081,12 +6085,12 @@ StartupXLOG(void)
* recoveryLastXTime.
*
* This is slightly confusing if we're starting from an online
* checkpoint; we've just read and replayed the chekpoint record,
* but we're going to start replay from its redo pointer, which
* precedes the location of the checkpoint record itself. So even
* though the last record we've replayed is indeed ReadRecPtr, we
* haven't replayed all the preceding records yet. That's OK for
* the current use of these variables.
* checkpoint; we've just read and replayed the chekpoint record, but
* we're going to start replay from its redo pointer, which precedes
* the location of the checkpoint record itself. So even though the
* last record we've replayed is indeed ReadRecPtr, we haven't
* replayed all the preceding records yet. That's OK for the current
* use of these variables.
*/
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->replayEndRecPtr = ReadRecPtr;
@ -6098,12 +6102,11 @@ StartupXLOG(void)
XLogReceiptTime = GetCurrentTimestamp();
/*
* Let postmaster know we've started redo now, so that it can
* launch bgwriter to perform restartpoints. We don't bother
* during crash recovery as restartpoints can only be performed
* during archive recovery. And we'd like to keep crash recovery
* simple, to avoid introducing bugs that could affect you when
* recovering after crash.
* Let postmaster know we've started redo now, so that it can launch
* bgwriter to perform restartpoints. We don't bother during crash
* recovery as restartpoints can only be performed during archive
* recovery. And we'd like to keep crash recovery simple, to avoid
* introducing bugs that could affect you when recovering after crash.
*
* After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are
@ -6117,7 +6120,8 @@ StartupXLOG(void)
}
/*
* Allow read-only connections immediately if we're consistent already.
* Allow read-only connections immediately if we're consistent
* already.
*/
CheckRecoveryConsistency();
@ -6214,7 +6218,10 @@ StartupXLOG(void)
xlogctl->replayEndRecPtr = EndRecPtr;
SpinLockRelease(&xlogctl->info_lck);
/* If we are attempting to enter Hot Standby mode, process XIDs we see */
/*
* If we are attempting to enter Hot Standby mode, process
* XIDs we see
*/
if (standbyState >= STANDBY_INITIALIZED &&
TransactionIdIsValid(record->xl_xid))
RecordKnownAssignedTransactionIds(record->xl_xid);
@ -6544,7 +6551,7 @@ StartupXLOG(void)
static void
CheckRecoveryConsistency(void)
{
static bool backendsAllowed = false;
static bool backendsAllowed = false;
/*
* Have we passed our safe starting point?
@ -6560,9 +6567,9 @@ CheckRecoveryConsistency(void)
}
/*
* Have we got a valid starting snapshot that will allow
* queries to be run? If so, we can tell postmaster that the
* database is consistent now, enabling connections.
* Have we got a valid starting snapshot that will allow queries to be
* run? If so, we can tell postmaster that the database is consistent now,
* enabling connections.
*/
if (standbyState == STANDBY_SNAPSHOT_READY &&
!backendsAllowed &&
@ -7400,8 +7407,8 @@ CreateCheckPoint(int flags)
{
/*
* Calculate the last segment that we need to retain because of
* wal_keep_segments, by subtracting wal_keep_segments from the
* new checkpoint location.
* wal_keep_segments, by subtracting wal_keep_segments from the new
* checkpoint location.
*/
if (wal_keep_segments > 0)
{
@ -7555,7 +7562,7 @@ CreateRestartPoint(int flags)
CheckPoint lastCheckPoint;
uint32 _logId;
uint32 _logSeg;
TimestampTz xtime;
TimestampTz xtime;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@ -7589,10 +7596,10 @@ CreateRestartPoint(int flags)
* restartpoint, we can't perform a new restart point. We still update
* minRecoveryPoint in that case, so that if this is a shutdown restart
* point, we won't start up earlier than before. That's not strictly
* necessary, but when hot standby is enabled, it would be rather
* weird if the database opened up for read-only connections at a
* point-in-time before the last shutdown. Such time travel is still
* possible in case of immediate shutdown, though.
* necessary, but when hot standby is enabled, it would be rather weird if
* the database opened up for read-only connections at a point-in-time
* before the last shutdown. Such time travel is still possible in case of
* immediate shutdown, though.
*
* We don't explicitly advance minRecoveryPoint when we do create a
* restartpoint. It's assumed that flushing the buffers will do that as a
@ -7621,9 +7628,9 @@ CreateRestartPoint(int flags)
}
/*
* Update the shared RedoRecPtr so that the startup process can
* calculate the number of segments replayed since last restartpoint,
* and request a restartpoint if it exceeds checkpoint_segments.
* Update the shared RedoRecPtr so that the startup process can calculate
* the number of segments replayed since last restartpoint, and request a
* restartpoint if it exceeds checkpoint_segments.
*
* You need to hold WALInsertLock and info_lck to update it, although
* during recovery acquiring WALInsertLock is just pro forma, because
@ -7712,8 +7719,8 @@ CreateRestartPoint(int flags)
ereport((log_checkpoints ? LOG : DEBUG2),
(errmsg("recovery restart point at %X/%X",
lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff),
xtime ? errdetail("last completed transaction was at log time %s",
timestamptz_to_str(xtime)) : 0));
xtime ? errdetail("last completed transaction was at log time %s",
timestamptz_to_str(xtime)) : 0));
LWLockRelease(CheckpointLock);
@ -7802,12 +7809,11 @@ XLogReportParameters(void)
max_locks_per_xact != ControlFile->max_locks_per_xact)
{
/*
* The change in number of backend slots doesn't need to be
* WAL-logged if archiving is not enabled, as you can't start
* archive recovery with wal_level=minimal anyway. We don't
* really care about the values in pg_control either if
* wal_level=minimal, but seems better to keep them up-to-date
* to avoid confusion.
* The change in number of backend slots doesn't need to be WAL-logged
* if archiving is not enabled, as you can't start archive recovery
* with wal_level=minimal anyway. We don't really care about the
* values in pg_control either if wal_level=minimal, but seems better
* to keep them up-to-date to avoid confusion.
*/
if (wal_level != ControlFile->wal_level || XLogIsNeeded())
{
@ -7874,9 +7880,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
/*
* If we see a shutdown checkpoint while waiting for an
* end-of-backup record, the backup was cancelled and the
* end-of-backup record will never arrive.
* If we see a shutdown checkpoint while waiting for an end-of-backup
* record, the backup was cancelled and the end-of-backup record will
* never arrive.
*/
if (InArchiveRecovery &&
!XLogRecPtrIsInvalid(ControlFile->backupStartPoint))
@ -7884,10 +7890,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
(errmsg("online backup was cancelled, recovery cannot continue")));
/*
* If we see a shutdown checkpoint, we know that nothing was
* running on the master at this point. So fake-up an empty
* running-xacts record and use that here and now. Recover
* additional standby state for prepared transactions.
* If we see a shutdown checkpoint, we know that nothing was running
* on the master at this point. So fake-up an empty running-xacts
* record and use that here and now. Recover additional standby state
* for prepared transactions.
*/
if (standbyState >= STANDBY_INITIALIZED)
{
@ -7901,9 +7907,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* Construct a RunningTransactions snapshot representing a shut
* down server, with only prepared transactions still alive.
* We're never overflowed at this point because all subxids
* are listed with their parent prepared transactions.
* down server, with only prepared transactions still alive. We're
* never overflowed at this point because all subxids are listed
* with their parent prepared transactions.
*/
running.xcnt = nxids;
running.subxid_overflow = false;
@ -8021,13 +8027,14 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
ControlFile->max_prepared_xacts = xlrec.max_prepared_xacts;
ControlFile->max_locks_per_xact = xlrec.max_locks_per_xact;
ControlFile->wal_level = xlrec.wal_level;
/*
* Update minRecoveryPoint to ensure that if recovery is aborted,
* we recover back up to this point before allowing hot standby
* again. This is particularly important if wal_level was set to
* 'archive' before, and is now 'hot_standby', to ensure you don't
* run queries against the WAL preceding the wal_level change.
* Same applies to decreasing max_* settings.
* Update minRecoveryPoint to ensure that if recovery is aborted, we
* recover back up to this point before allowing hot standby again.
* This is particularly important if wal_level was set to 'archive'
* before, and is now 'hot_standby', to ensure you don't run queries
* against the WAL preceding the wal_level change. Same applies to
* decreasing max_* settings.
*/
minRecoveryPoint = ControlFile->minRecoveryPoint;
if ((minRecoveryPoint.xlogid != 0 || minRecoveryPoint.xrecoff != 0)
@ -8321,7 +8328,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
if (!XLogIsNeeded())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for making an online backup"),
errmsg("WAL level not sufficient for making an online backup"),
errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start.")));
backupidstr = text_to_cstring(backupid);
@ -8513,7 +8520,7 @@ pg_stop_backup(PG_FUNCTION_ARGS)
if (!XLogIsNeeded())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for making an online backup"),
errmsg("WAL level not sufficient for making an online backup"),
errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start.")));
/*
@ -8623,17 +8630,17 @@ pg_stop_backup(PG_FUNCTION_ARGS)
/*
* If archiving is enabled, wait for all the required WAL files to be
* archived before returning. If archiving isn't enabled, the required
* WAL needs to be transported via streaming replication (hopefully
* with wal_keep_segments set high enough), or some more exotic
* mechanism like polling and copying files from pg_xlog with script.
* We have no knowledge of those mechanisms, so it's up to the user to
* ensure that he gets all the required WAL.
* archived before returning. If archiving isn't enabled, the required WAL
* needs to be transported via streaming replication (hopefully with
* wal_keep_segments set high enough), or some more exotic mechanism like
* polling and copying files from pg_xlog with script. We have no
* knowledge of those mechanisms, so it's up to the user to ensure that he
* gets all the required WAL.
*
* We wait until both the last WAL file filled during backup and the
* history file have been archived, and assume that the alphabetic
* sorting property of the WAL files ensures any earlier WAL files are
* safely archived as well.
* history file have been archived, and assume that the alphabetic sorting
* property of the WAL files ensures any earlier WAL files are safely
* archived as well.
*
* We wait forever, since archive_command is supposed to work and we
* assume the admin wanted his backup to work completely. If you don't
@ -8642,44 +8649,44 @@ pg_stop_backup(PG_FUNCTION_ARGS)
*/
if (XLogArchivingActive())
{
XLByteToPrevSeg(stoppoint, _logId, _logSeg);
XLogFileName(lastxlogfilename, ThisTimeLineID, _logId, _logSeg);
XLByteToPrevSeg(stoppoint, _logId, _logSeg);
XLogFileName(lastxlogfilename, ThisTimeLineID, _logId, _logSeg);
XLByteToSeg(startpoint, _logId, _logSeg);
BackupHistoryFileName(histfilename, ThisTimeLineID, _logId, _logSeg,
startpoint.xrecoff % XLogSegSize);
XLByteToSeg(startpoint, _logId, _logSeg);
BackupHistoryFileName(histfilename, ThisTimeLineID, _logId, _logSeg,
startpoint.xrecoff % XLogSegSize);
seconds_before_warning = 60;
waits = 0;
seconds_before_warning = 60;
waits = 0;
while (XLogArchiveIsBusy(lastxlogfilename) ||
XLogArchiveIsBusy(histfilename))
{
CHECK_FOR_INTERRUPTS();
if (!reported_waiting && waits > 5)
while (XLogArchiveIsBusy(lastxlogfilename) ||
XLogArchiveIsBusy(histfilename))
{
ereport(NOTICE,
(errmsg("pg_stop_backup cleanup done, waiting for required WAL segments to be archived")));
reported_waiting = true;
CHECK_FOR_INTERRUPTS();
if (!reported_waiting && waits > 5)
{
ereport(NOTICE,
(errmsg("pg_stop_backup cleanup done, waiting for required WAL segments to be archived")));
reported_waiting = true;
}
pg_usleep(1000000L);
if (++waits >= seconds_before_warning)
{
seconds_before_warning *= 2; /* This wraps in >10 years... */
ereport(WARNING,
(errmsg("pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)",
waits),
errhint("Check that your archive_command is executing properly. "
"pg_stop_backup can be cancelled safely, "
"but the database backup will not be usable without all the WAL segments.")));
}
}
pg_usleep(1000000L);
if (++waits >= seconds_before_warning)
{
seconds_before_warning *= 2; /* This wraps in >10 years... */
ereport(WARNING,
(errmsg("pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)",
waits),
errhint("Check that your archive_command is executing properly. "
"pg_stop_backup can be cancelled safely, "
"but the database backup will not be usable without all the WAL segments.")));
}
}
ereport(NOTICE,
(errmsg("pg_stop_backup complete, all required WAL segments have been archived")));
ereport(NOTICE,
(errmsg("pg_stop_backup complete, all required WAL segments have been archived")));
}
else
ereport(NOTICE,
@ -8939,7 +8946,7 @@ pg_xlogfile_name(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"),
errhint("pg_xlogfile_name() cannot be executed during recovery.")));
errhint("pg_xlogfile_name() cannot be executed during recovery.")));
locationstr = text_to_cstring(location);
@ -9277,8 +9284,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{
/*
* Signal bgwriter to start a restartpoint if we've replayed too
* much xlog since the last one.
* Signal bgwriter to start a restartpoint if we've replayed too much
* xlog since the last one.
*/
if (StandbyMode && bgwriterLaunched)
{
@ -9313,17 +9320,17 @@ retry:
{
if (WalRcvInProgress())
{
bool havedata;
bool havedata;
/*
* If we find an invalid record in the WAL streamed from
* master, something is seriously wrong. There's little
* chance that the problem will just go away, but PANIC
* is not good for availability either, especially in
* hot standby mode. Disconnect, and retry from
* archive/pg_xlog again. The WAL in the archive should
* be identical to what was streamed, so it's unlikely
* that it helps, but one can hope...
* chance that the problem will just go away, but PANIC is
* not good for availability either, especially in hot
* standby mode. Disconnect, and retry from
* archive/pg_xlog again. The WAL in the archive should be
* identical to what was streamed, so it's unlikely that
* it helps, but one can hope...
*/
if (failedSources & XLOG_FROM_STREAM)
{
@ -9338,8 +9345,8 @@ retry:
* WAL from walreceiver and observe that we had already
* processed everything before the most recent "chunk"
* that it flushed to disk. In steady state where we are
* keeping up with the incoming data, XLogReceiptTime
* will be updated on each cycle. When we are behind,
* keeping up with the incoming data, XLogReceiptTime will
* be updated on each cycle. When we are behind,
* XLogReceiptTime will not advance, so the grace time
* alloted to conflicting queries will decrease.
*/
@ -9399,8 +9406,8 @@ retry:
}
else
{
int sources;
pg_time_t now;
int sources;
pg_time_t now;
/*
* Until walreceiver manages to reconnect, poll the
@ -9442,21 +9449,21 @@ retry:
/*
* If primary_conninfo is set, launch walreceiver to
* try to stream the missing WAL, before retrying
* to restore from archive/pg_xlog.
* try to stream the missing WAL, before retrying to
* restore from archive/pg_xlog.
*
* If fetching_ckpt is TRUE, RecPtr points to the
* initial checkpoint location. In that case, we use
* RedoStartLSN as the streaming start position instead
* of RecPtr, so that when we later jump backwards to
* start redo at RedoStartLSN, we will have the logs
* streamed already.
* RedoStartLSN as the streaming start position
* instead of RecPtr, so that when we later jump
* backwards to start redo at RedoStartLSN, we will
* have the logs streamed already.
*/
if (PrimaryConnInfo)
{
RequestXLogStreaming(
fetching_ckpt ? RedoStartLSN : *RecPtr,
PrimaryConnInfo);
fetching_ckpt ? RedoStartLSN : *RecPtr,
PrimaryConnInfo);
continue;
}
}
@ -9474,10 +9481,10 @@ retry:
failedSources |= sources;
/*
* Check to see if the trigger file exists. Note that
* we do this only after failure, so when you create
* the trigger file, we still finish replaying as much
* as we can from archive and pg_xlog before failover.
* Check to see if the trigger file exists. Note that we
* do this only after failure, so when you create the
* trigger file, we still finish replaying as much as we
* can from archive and pg_xlog before failover.
*/
if (CheckForStandbyTrigger())
goto triggered;
@ -9495,7 +9502,7 @@ retry:
/* In archive or crash recovery. */
if (readFile < 0)
{
int sources;
int sources;
/* Reset curFileTLI if random fetch. */
if (randAccess)
@ -9515,8 +9522,8 @@ retry:
}
/*
* At this point, we have the right segment open and if we're streaming
* we know the requested record is in it.
* At this point, we have the right segment open and if we're streaming we
* know the requested record is in it.
*/
Assert(readFile != -1);
@ -9619,13 +9626,13 @@ triggered:
* in the current WAL page, previously read by XLogPageRead().
*
* 'emode' is the error mode that would be used to report a file-not-found
* or legitimate end-of-WAL situation. Generally, we use it as-is, but if
* or legitimate end-of-WAL situation. Generally, we use it as-is, but if
* we're retrying the exact same record that we've tried previously, only
* complain the first time to keep the noise down. However, we only do when
* complain the first time to keep the noise down. However, we only do when
* reading from pg_xlog, because we don't expect any invalid records in archive
* or in records streamed from master. Files in the archive should be complete,
* and we should never hit the end of WAL because we stop and wait for more WAL
* to arrive before replaying it.
* to arrive before replaying it.
*
* NOTE: This function remembers the RecPtr value it was last called with,
* to suppress repeated messages about the same record. Only call this when

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.167 2010/06/13 17:43:12 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.168 2010/07/06 19:18:55 momjian Exp $
*
* NOTES
* See acl.h.
@ -305,7 +305,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
if (is_grant)
{
if (this_privileges == 0)
{
{
if (objkind == ACL_KIND_COLUMN && colname)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED),
@ -356,8 +356,8 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
else
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
errmsg("not all privileges could be revoked for \"%s\"",
objname)));
errmsg("not all privileges could be revoked for \"%s\"",
objname)));
}
}
@ -1089,7 +1089,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
/*
* The default for a global entry is the hard-wired default ACL for the
* particular object type. The default for non-global entries is an empty
* particular object type. The default for non-global entries is an empty
* ACL. This must be so because global entries replace the hard-wired
* defaults, while others are added on.
*/
@ -1188,8 +1188,8 @@ SetDefaultACL(InternalDefaultACL *iacls)
/*
* If the result is the same as the default value, we do not need an
* explicit pg_default_acl entry, and should in fact remove the entry
* if it exists. Must sort both arrays to compare properly.
* explicit pg_default_acl entry, and should in fact remove the entry if
* it exists. Must sort both arrays to compare properly.
*/
aclitemsort(new_acl);
aclitemsort(def_acl);
@ -1256,7 +1256,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
if (OidIsValid(iacls->nspid))
{
ObjectAddress myself,
referenced;
referenced;
myself.classId = DefaultAclRelationId;
myself.objectId = HeapTupleGetOid(newtuple);
@ -3202,8 +3202,8 @@ aclcheck_error_col(AclResult aclerr, AclObjectKind objectkind,
case ACLCHECK_NO_PRIV:
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for column \"%s\" of relation \"%s\"",
colname, objectname)));
errmsg("permission denied for column \"%s\" of relation \"%s\"",
colname, objectname)));
break;
case ACLCHECK_NOT_OWNER:
/* relation msg is OK since columns don't have separate owners */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.175 2010/05/11 04:52:28 itagaki Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.176 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -629,7 +629,7 @@ ProcedureCreate(const char *procedureName,
/* Set per-function configuration parameters */
set_items = (ArrayType *) DatumGetPointer(proconfig);
if (set_items) /* Need a new GUC nesting level */
if (set_items) /* Need a new GUC nesting level */
{
save_nestlevel = NewGUCNestLevel();
ProcessGUCArray(set_items,
@ -638,7 +638,7 @@ ProcedureCreate(const char *procedureName,
GUC_ACTION_SAVE);
}
else
save_nestlevel = 0; /* keep compiler quiet */
save_nestlevel = 0; /* keep compiler quiet */
OidFunctionCall1(languageValidator, ObjectIdGetDatum(retval));

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.42 2010/07/03 13:53:13 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.43 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -391,9 +391,9 @@ getOidListDiff(Oid *list1, int *nlist1, Oid *list2, int *nlist2)
* and then insert or delete from pg_shdepend as appropiate.
*
* Note that we can't just insert all referenced roles blindly during GRANT,
* because we would end up with duplicate registered dependencies. We could
* because we would end up with duplicate registered dependencies. We could
* check for existence of the tuples before inserting, but that seems to be
* more expensive than what we are doing here. Likewise we can't just delete
* more expensive than what we are doing here. Likewise we can't just delete
* blindly during REVOKE, because the user may still have other privileges.
* It is also possible that REVOKE actually adds dependencies, due to
* instantiation of a formerly implicit default ACL (although at present,
@ -401,7 +401,7 @@ getOidListDiff(Oid *list1, int *nlist1, Oid *list2, int *nlist2)
*
* NOTE: Both input arrays must be sorted and de-duped. (Typically they
* are extracted from an ACL array by aclmembers(), which takes care of
* both requirements.) The arrays are pfreed before return.
* both requirements.) The arrays are pfreed before return.
*/
void
updateAclDependencies(Oid classId, Oid objectId, int32 objsubId,
@ -413,8 +413,8 @@ updateAclDependencies(Oid classId, Oid objectId, int32 objsubId,
int i;
/*
* Remove entries that are common to both lists; those represent
* existing dependencies we don't need to change.
* Remove entries that are common to both lists; those represent existing
* dependencies we don't need to change.
*
* OK to overwrite the inputs since we'll pfree them anyway.
*/
@ -460,7 +460,7 @@ updateAclDependencies(Oid classId, Oid objectId, int32 objsubId,
continue;
shdepDropDependency(sdepRel, classId, objectId, objsubId,
false, /* exact match on objsubId */
false, /* exact match on objsubId */
AuthIdRelationId, roleid,
SHARED_DEPENDENCY_ACL);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.197 2010/06/01 00:33:23 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.198 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -785,14 +785,14 @@ CheckMutability(Expr *expr)
{
/*
* First run the expression through the planner. This has a couple of
* important consequences. First, function default arguments will get
* important consequences. First, function default arguments will get
* inserted, which may affect volatility (consider "default now()").
* Second, inline-able functions will get inlined, which may allow us to
* conclude that the function is really less volatile than it's marked.
* As an example, polymorphic functions must be marked with the most
* volatile behavior that they have for any input type, but once we
* inline the function we may be able to conclude that it's not so
* volatile for the particular input type we're dealing with.
* conclude that the function is really less volatile than it's marked. As
* an example, polymorphic functions must be marked with the most volatile
* behavior that they have for any input type, but once we inline the
* function we may be able to conclude that it's not so volatile for the
* particular input type we're dealing with.
*
* We assume here that expression_planner() won't scribble on its input.
*/

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.67 2010/07/03 13:53:13 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.68 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1954,8 +1954,8 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId)
void
AlterOpClassOwner_oid(Oid opclassOid, Oid newOwnerId)
{
HeapTuple tup;
Relation rel;
HeapTuple tup;
Relation rel;
rel = heap_open(OperatorClassRelationId, RowExclusiveLock);
@ -2097,8 +2097,8 @@ AlterOpFamilyOwner(List *name, const char *access_method, Oid newOwnerId)
void
AlterOpFamilyOwner_oid(Oid opfamilyOid, Oid newOwnerId)
{
HeapTuple tup;
Relation rel;
HeapTuple tup;
Relation rel;
rel = heap_open(OperatorFamilyRelationId, RowExclusiveLock);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.46 2010/06/22 11:36:16 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.47 2010/07/06 19:18:56 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -89,9 +89,9 @@ DefineOperator(List *names, List *parameters)
oprNamespace = QualifiedNameGetCreationNamespace(names, &oprName);
/*
* The SQL standard committee has decided that => should be used for
* named parameters; therefore, a future release of PostgreSQL may
* disallow it as the name of a user-defined operator.
* The SQL standard committee has decided that => should be used for named
* parameters; therefore, a future release of PostgreSQL may disallow it
* as the name of a user-defined operator.
*/
if (strcmp(oprName, "=>") == 0)
ereport(WARNING,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.331 2010/07/01 14:10:21 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.332 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1959,8 +1959,8 @@ renameatt(Oid myrelid,
/*
* Renaming the columns of sequences or toast tables doesn't actually
* break anything from the system's point of view, since internal
* references are by attnum. But it doesn't seem right to allow users
* to change names that are hardcoded into the system, hence the following
* references are by attnum. But it doesn't seem right to allow users to
* change names that are hardcoded into the system, hence the following
* restriction.
*/
relkind = RelationGetForm(targetrelation)->relkind;
@ -1970,8 +1970,8 @@ renameatt(Oid myrelid,
relkind != RELKIND_INDEX)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table, view, composite type or index",
RelationGetRelationName(targetrelation))));
errmsg("\"%s\" is not a table, view, composite type or index",
RelationGetRelationName(targetrelation))));
/*
* permissions checking. only the owner of a class can change its schema.
@ -7049,9 +7049,9 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
for (blkno = 0; blkno < nblocks; blkno++)
{
/* If we got a cancel signal during the copy of the data, quit */
CHECK_FOR_INTERRUPTS();
/* If we got a cancel signal during the copy of the data, quit */
CHECK_FOR_INTERRUPTS();
smgrread(src, forkNum, blkno, buf);
/* XLOG stuff */

View File

@ -40,7 +40,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.75 2010/07/02 02:44:32 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.76 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -554,7 +554,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
(errcode(ERRCODE_UNDEFINED_FILE),
errmsg("directory \"%s\" does not exist", location),
InRecovery ? errhint("Create directory \"%s\" for this tablespace before "
"restarting the server.", location) : 0));
"restarting the server.", location) : 0));
else
ereport(ERROR,
(errcode_for_file_access(),

View File

@ -29,7 +29,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.135 2010/04/22 02:15:45 sriggs Exp $
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.136 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -399,9 +399,11 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats);
/* Remove tuples from heap */
lazy_vacuum_heap(onerel, vacrelstats);
/*
* Forget the now-vacuumed tuples, and press on, but be careful
* not to reset latestRemovedXid since we want that value to be valid.
* not to reset latestRemovedXid since we want that value to be
* valid.
*/
vacrelstats->num_dead_tuples = 0;
vacrelstats->num_index_scans++;
@ -491,7 +493,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* We count tuples removed by the pruning step as removed by VACUUM.
*/
tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
&vacrelstats->latestRemovedXid);
&vacrelstats->latestRemovedXid);
/*
* Now scan the page to collect vacuumable items and check for tuples
* requiring freezing.
@ -682,9 +685,11 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
{
/* Remove tuples from heap */
lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
/*
* Forget the now-vacuumed tuples, and press on, but be careful
* not to reset latestRemovedXid since we want that value to be valid.
* not to reset latestRemovedXid since we want that value to be
* valid.
*/
vacrelstats->num_dead_tuples = 0;
vacuumed_pages++;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.172 2010/05/29 02:32:08 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.173 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1310,8 +1310,8 @@ retry:
/*
* We should have found our tuple in the index, unless we exited the loop
* early because of conflict. Complain if not. If we ever implement
* '<>' index opclasses, this check will fail and will have to be removed.
* early because of conflict. Complain if not. If we ever implement '<>'
* index opclasses, this check will fail and will have to be removed.
*/
if (!found_self && !conflict)
ereport(ERROR,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.143 2010/03/19 22:54:40 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.144 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -939,9 +939,9 @@ sql_exec_error_callback(void *arg)
else
{
/*
* Assume we failed during init_sql_fcache(). (It's possible that
* the function actually has an empty body, but in that case we may
* as well report all errors as being "during startup".)
* Assume we failed during init_sql_fcache(). (It's possible that the
* function actually has an empty body, but in that case we may as
* well report all errors as being "during startup".)
*/
errcontext("SQL function \"%s\" during startup", fcache->fname);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.102 2010/05/28 01:14:03 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.103 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -252,7 +252,7 @@ MJExamineQuals(List *mergeclauses,
* input, since we assume mergejoin operators are strict. If the NULL
* is in the first join column, and that column sorts nulls last, then
* we can further conclude that no following tuple can match anything
* either, since they must all have nulls in the first column. However,
* either, since they must all have nulls in the first column. However,
* that case is only interesting if we're not in FillOuter mode, else
* we have to visit all the tuples anyway.
*
@ -748,6 +748,7 @@ ExecMergeJoin(MergeJoinState *node)
switch (MJEvalInnerValues(node, innerTupleSlot))
{
case MJEVAL_MATCHABLE:
/*
* OK, we have the initial tuples. Begin by skipping
* non-matching tuples.
@ -922,6 +923,7 @@ ExecMergeJoin(MergeJoinState *node)
switch (MJEvalInnerValues(node, innerTupleSlot))
{
case MJEVAL_MATCHABLE:
/*
* Test the new inner tuple to see if it matches
* outer.
@ -944,6 +946,7 @@ ExecMergeJoin(MergeJoinState *node)
}
break;
case MJEVAL_NONMATCHABLE:
/*
* It contains a NULL and hence can't match any outer
* tuple, so we can skip the comparison and assume the
@ -952,10 +955,11 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_JoinState = EXEC_MJ_NEXTOUTER;
break;
case MJEVAL_ENDOFJOIN:
/*
* No more inner tuples. However, this might be
* only effective and not physical end of inner plan,
* so force mj_InnerTupleSlot to null to make sure we
* No more inner tuples. However, this might be only
* effective and not physical end of inner plan, so
* force mj_InnerTupleSlot to null to make sure we
* don't fetch more inner tuples. (We need this hack
* because we are not transiting to a state where the
* inner plan is assumed to be exhausted.)
@ -1152,9 +1156,11 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
break;
case MJEVAL_NONMATCHABLE:
/*
* current inner can't possibly match any outer;
* better to advance the inner scan than the outer.
* better to advance the inner scan than the
* outer.
*/
node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE;
break;
@ -1337,6 +1343,7 @@ ExecMergeJoin(MergeJoinState *node)
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
break;
case MJEVAL_NONMATCHABLE:
/*
* current inner can't possibly match any outer;
* better to advance the inner scan than the outer.

View File

@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.53 2010/05/08 16:39:49 tgl Exp $
* $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.54 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -226,8 +226,8 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
/*
* Keep a trailing null in place, even though it's probably useless for
* binary data. (Some callers are dealing with text but call this
* because their input isn't null-terminated.)
* binary data. (Some callers are dealing with text but call this because
* their input isn't null-terminated.)
*/
str->data[str->len] = '\0';
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.202 2010/06/29 04:12:47 petere Exp $
* $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.203 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -179,7 +179,7 @@ static int pg_GSS_recvauth(Port *port);
*----------------------------------------------------------------
*/
#ifdef ENABLE_SSPI
typedef SECURITY_STATUS
typedef SECURITY_STATUS
(WINAPI * QUERY_SECURITY_CONTEXT_TOKEN_FN) (
PCtxtHandle, void **);
static int pg_SSPI_recvauth(Port *port);
@ -233,8 +233,8 @@ static void
auth_failed(Port *port, int status)
{
const char *errstr;
int errcode_return = ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION;
int errcode_return = ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION;
/*
* If we failed due to EOF from client, just quit; there's no point in
* trying to send a message to the client, and not much point in logging
@ -369,13 +369,13 @@ ClientAuthentication(Port *port)
/*
* An explicit "reject" entry in pg_hba.conf. This report exposes
* the fact that there's an explicit reject entry, which is perhaps
* not so desirable from a security standpoint; but the message
* for an implicit reject could confuse the DBA a lot when the
* true situation is a match to an explicit reject. And we don't
* want to change the message for an implicit reject. As noted
* below, the additional information shown here doesn't expose
* anything not known to an attacker.
* the fact that there's an explicit reject entry, which is
* perhaps not so desirable from a security standpoint; but the
* message for an implicit reject could confuse the DBA a lot when
* the true situation is a match to an explicit reject. And we
* don't want to change the message for an implicit reject. As
* noted below, the additional information shown here doesn't
* expose anything not known to an attacker.
*/
{
char hostinfo[NI_MAXHOST];
@ -389,32 +389,32 @@ ClientAuthentication(Port *port)
{
#ifdef USE_SSL
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s",
hostinfo, port->user_name,
port->ssl ? _("SSL on") : _("SSL off"))));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s",
hostinfo, port->user_name,
port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\"",
hostinfo, port->user_name)));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\"",
hostinfo, port->user_name)));
#endif
}
else
{
#ifdef USE_SSL
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\", %s",
hostinfo, port->user_name,
port->database_name,
port->ssl ? _("SSL on") : _("SSL off"))));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\", %s",
hostinfo, port->user_name,
port->database_name,
port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\"",
hostinfo, port->user_name,
port->database_name)));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\"",
hostinfo, port->user_name,
port->database_name)));
#endif
}
break;
@ -442,32 +442,32 @@ ClientAuthentication(Port *port)
{
#ifdef USE_SSL
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\", %s",
hostinfo, port->user_name,
port->ssl ? _("SSL on") : _("SSL off"))));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\", %s",
hostinfo, port->user_name,
port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\"",
hostinfo, port->user_name)));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\"",
hostinfo, port->user_name)));
#endif
}
else
{
#ifdef USE_SSL
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
hostinfo, port->user_name,
port->database_name,
port->ssl ? _("SSL on") : _("SSL off"))));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
hostinfo, port->user_name,
port->database_name,
port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
hostinfo, port->user_name,
port->database_name)));
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
hostinfo, port->user_name,
port->database_name)));
#endif
}
break;
@ -2781,7 +2781,7 @@ CheckRADIUSAuth(Port *port)
timeout.tv_sec = RADIUS_TIMEOUT;
timeout.tv_usec = 0;
FD_ZERO(&fdset);
FD_SET (sock, &fdset);
FD_SET(sock, &fdset);
while (true)
{
@ -2904,8 +2904,8 @@ CheckRADIUSAuth(Port *port)
else
{
ereport(LOG,
(errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
receivepacket->code, port->user_name)));
(errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
receivepacket->code, port->user_name)));
return STATUS_ERROR;
}
}

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.101 2010/05/26 16:15:57 tgl Exp $
* $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.102 2010/07/06 19:18:56 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@ -500,7 +500,7 @@ err:
* to verify that the DBA-generated DH parameters file contains
* what we expect it to contain.
*/
static DH *
static DH *
load_dh_file(int keylength)
{
FILE *fp;
@ -558,7 +558,7 @@ load_dh_file(int keylength)
* To prevent problems if the DH parameters files don't even
* exist, we can load DH parameters hardcoded into this file.
*/
static DH *
static DH *
load_dh_buffer(const char *buffer, size_t len)
{
BIO *bio;
@ -590,7 +590,7 @@ load_dh_buffer(const char *buffer, size_t len)
* the OpenSSL library can efficiently generate random keys from
* the information provided.
*/
static DH *
static DH *
tmp_dh_cb(SSL *s, int is_export, int keylength)
{
DH *r = NULL;
@ -720,6 +720,7 @@ static void
initialize_SSL(void)
{
struct stat buf;
STACK_OF(X509_NAME) *root_cert_list = NULL;
if (!SSL_context)
@ -809,7 +810,7 @@ initialize_SSL(void)
ROOT_CERT_FILE)));
}
else if (SSL_CTX_load_verify_locations(SSL_context, ROOT_CERT_FILE, NULL) != 1 ||
(root_cert_list = SSL_load_client_CA_file(ROOT_CERT_FILE)) == NULL)
(root_cert_list = SSL_load_client_CA_file(ROOT_CERT_FILE)) == NULL)
{
/*
* File was there, but we could not load it. This means the file is
@ -867,7 +868,7 @@ initialize_SSL(void)
ssl_loaded_verify_locations = true;
}
/*
/*
* Tell OpenSSL to send the list of root certs we trust to clients in
* CertificateRequests. This lets a client with a keystore select the
* appropriate client certificate to send to us.

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.208 2010/06/03 19:29:38 petere Exp $
* $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.209 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -711,7 +711,7 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("hostssl not supported on this platform"),
errhint("Compile with --with-openssl to use SSL connections."),
errhint("Compile with --with-openssl to use SSL connections."),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
@ -891,8 +891,8 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("IP address and mask do not match"),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
}
}
@ -1011,14 +1011,15 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("gssapi authentication is not supported on local sockets"),
errmsg("gssapi authentication is not supported on local sockets"),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
}
/*
* SSPI authentication can never be enabled on ctLocal connections, because
* it's only supported on Windows, where ctLocal isn't supported.
* SSPI authentication can never be enabled on ctLocal connections,
* because it's only supported on Windows, where ctLocal isn't supported.
*/
@ -1248,8 +1249,8 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("unrecognized authentication option name: \"%s\"",
token),
errmsg("unrecognized authentication option name: \"%s\"",
token),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
@ -1633,8 +1634,8 @@ parse_ident_usermap(List *line, int line_number, const char *usermap_name,
pg_regerror(r, &re, errstr, sizeof(errstr));
ereport(LOG,
(errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
errmsg("regular expression match for \"%s\" failed: %s",
file_ident_user + 1, errstr)));
errmsg("regular expression match for \"%s\" failed: %s",
file_ident_user + 1, errstr)));
*error_p = true;
}

View File

@ -59,7 +59,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.217 2010/04/19 00:55:25 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.218 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1854,8 +1854,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
cpu_operator_cost * inner_path_rows * rescanratio;
/*
* Prefer materializing if it looks cheaper, unless the user has asked
* to suppress materialization.
* Prefer materializing if it looks cheaper, unless the user has asked to
* suppress materialization.
*/
if (enable_material && mat_inner_cost < bare_inner_cost)
path->materialize_inner = true;
@ -1872,9 +1872,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* selected as the input of a mergejoin, and they don't support
* mark/restore at present.
*
* We don't test the value of enable_material here, because materialization
* is required for correctness in this case, and turning it off does not
* entitle us to deliver an invalid plan.
* We don't test the value of enable_material here, because
* materialization is required for correctness in this case, and turning
* it off does not entitle us to deliver an invalid plan.
*/
else if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path->pathtype))
@ -1887,8 +1887,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* We don't try to adjust the cost estimates for this consideration,
* though.
*
* Since materialization is a performance optimization in this case, rather
* than necessary for correctness, we skip it if enable_material is off.
* Since materialization is a performance optimization in this case,
* rather than necessary for correctness, we skip it if enable_material is
* off.
*/
else if (enable_material && innersortkeys != NIL &&
relation_byte_size(inner_path_rows, inner_path->parent->width) >

View File

@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/analyzejoins.c,v 1.2 2010/05/23 16:34:38 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/analyzejoins.c,v 1.3 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -37,7 +37,7 @@ static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved);
* Check for relations that don't actually need to be joined at all,
* and remove them from the query.
*
* We are passed the current joinlist and return the updated list. Other
* We are passed the current joinlist and return the updated list. Other
* data structures that have to be updated are accessible via "root".
*/
List *
@ -46,15 +46,15 @@ remove_useless_joins(PlannerInfo *root, List *joinlist)
ListCell *lc;
/*
* We are only interested in relations that are left-joined to, so we
* can scan the join_info_list to find them easily.
* We are only interested in relations that are left-joined to, so we can
* scan the join_info_list to find them easily.
*/
restart:
foreach(lc, root->join_info_list)
{
SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
int innerrelid;
int nremoved;
int innerrelid;
int nremoved;
/* Skip if not removable */
if (!join_is_removable(root, sjinfo))
@ -85,7 +85,7 @@ restart:
* Restart the scan. This is necessary to ensure we find all
* removable joins independently of ordering of the join_info_list
* (note that removal of attr_needed bits may make a join appear
* removable that did not before). Also, since we just deleted the
* removable that did not before). Also, since we just deleted the
* current list cell, we'd have to have some kluge to continue the
* list scan anyway.
*/
@ -328,7 +328,7 @@ remove_rel_from_query(PlannerInfo *root, int relid)
if (otherrel == NULL)
continue;
Assert(otherrel->relid == rti); /* sanity check on array */
Assert(otherrel->relid == rti); /* sanity check on array */
/* no point in processing target rel itself */
if (otherrel == rel)
@ -346,10 +346,10 @@ remove_rel_from_query(PlannerInfo *root, int relid)
/*
* Likewise remove references from SpecialJoinInfo data structures.
*
* This is relevant in case the outer join we're deleting is nested
* inside other outer joins: the upper joins' relid sets have to be
* adjusted. The RHS of the target outer join will be made empty here,
* but that's OK since caller will delete that SpecialJoinInfo entirely.
* This is relevant in case the outer join we're deleting is nested inside
* other outer joins: the upper joins' relid sets have to be adjusted.
* The RHS of the target outer join will be made empty here, but that's OK
* since caller will delete that SpecialJoinInfo entirely.
*/
foreach(l, root->join_info_list)
{
@ -374,7 +374,7 @@ remove_rel_from_query(PlannerInfo *root, int relid)
PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
phinfo->ph_eval_at = bms_del_member(phinfo->ph_eval_at, relid);
if (bms_is_empty(phinfo->ph_eval_at)) /* oops, belay that */
if (bms_is_empty(phinfo->ph_eval_at)) /* oops, belay that */
phinfo->ph_eval_at = bms_add_member(phinfo->ph_eval_at, relid);
phinfo->ph_needed = bms_del_member(phinfo->ph_needed, relid);
@ -412,7 +412,7 @@ remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved)
else if (IsA(jlnode, List))
{
/* Recurse to handle subproblem */
List *sublist;
List *sublist;
sublist = remove_rel_from_joinlist((List *) jlnode,
relid, nremoved);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.52 2010/05/10 16:25:46 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.53 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -654,8 +654,8 @@ attach_notnull_index_qual(MinMaxAggInfo *info, IndexScan *iplan)
RowCompareExpr *rc = (RowCompareExpr *) qual;
/*
* Examine just the first column of the rowcompare, which is
* what determines its placement in the overall qual list.
* Examine just the first column of the rowcompare, which is what
* determines its placement in the overall qual list.
*/
leftop = (Expr *) linitial(rc->largs);

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.118 2010/03/28 22:59:33 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.119 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -224,7 +224,7 @@ query_planner(PlannerInfo *root, List *tlist,
fix_placeholder_eval_levels(root);
/*
* Remove any useless outer joins. Ideally this would be done during
* Remove any useless outer joins. Ideally this would be done during
* jointree preprocessing, but the necessary information isn't available
* until we've built baserel data structures and classified qual clauses.
*/

View File

@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.72 2010/06/21 00:14:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.73 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1315,7 +1315,7 @@ pullup_replace_vars_callback(Var *var,
&colnames, &fields);
/* Adjust the generated per-field Vars, but don't insert PHVs */
rcon->need_phvs = false;
context->sublevels_up = 0; /* to match the expandRTE output */
context->sublevels_up = 0; /* to match the expandRTE output */
fields = (List *) replace_rte_variables_mutator((Node *) fields,
context);
rcon->need_phvs = save_need_phvs;

View File

@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.182 2010/05/11 15:31:37 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.183 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1268,8 +1268,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
* if this is the parent table, leave copyObject's result alone.
*
* Note: we need to do this even though the executor won't run any
* permissions checks on the child RTE. The modifiedCols bitmap
* may be examined for trigger-firing purposes.
* permissions checks on the child RTE. The modifiedCols bitmap may
* be examined for trigger-firing purposes.
*/
if (childOID != parentOID)
{

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/optimizer/util/placeholder.c,v 1.7 2010/03/28 22:59:33 tgl Exp $
* $PostgreSQL: pgsql/src/backend/optimizer/util/placeholder.c,v 1.8 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -177,7 +177,7 @@ fix_placeholder_eval_levels(PlannerInfo *root)
* If any placeholder can be computed at a base rel and is needed above it,
* add it to that rel's targetlist. We have to do this separately from
* fix_placeholder_eval_levels() because join removal happens in between,
* and can change the ph_eval_at sets. There is essentially the same logic
* and can change the ph_eval_at sets. There is essentially the same logic
* in add_placeholders_to_joinrel, but we can't do that part until joinrels
* are formed.
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.255 2010/06/30 18:10:23 heikki Exp $
* $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.256 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1226,60 +1226,61 @@ transformFuncCall(ParseState *pstate, FuncCall *fn)
/* ... and hand off to ParseFuncOrColumn */
result = ParseFuncOrColumn(pstate,
fn->funcname,
targs,
fn->agg_order,
fn->agg_star,
fn->agg_distinct,
fn->func_variadic,
fn->over,
false,
fn->location);
fn->funcname,
targs,
fn->agg_order,
fn->agg_star,
fn->agg_distinct,
fn->func_variadic,
fn->over,
false,
fn->location);
/*
* pg_get_expr() is a system function that exposes the expression
* deparsing functionality in ruleutils.c to users. Very handy, but
* it was later realized that the functions in ruleutils.c don't check
* the input rigorously, assuming it to come from system catalogs and
* to therefore be valid. That makes it easy for a user to crash the
* backend by passing a maliciously crafted string representation of
* an expression to pg_get_expr().
* deparsing functionality in ruleutils.c to users. Very handy, but it was
* later realized that the functions in ruleutils.c don't check the input
* rigorously, assuming it to come from system catalogs and to therefore
* be valid. That makes it easy for a user to crash the backend by passing
* a maliciously crafted string representation of an expression to
* pg_get_expr().
*
* There's a lot of code in ruleutils.c, so it's not feasible to add
* water-proof input checking after the fact. Even if we did it once,
* it would need to be taken into account in any future patches too.
* water-proof input checking after the fact. Even if we did it once, it
* would need to be taken into account in any future patches too.
*
* Instead, we restrict pg_rule_expr() to only allow input from system
* catalogs instead. This is a hack, but it's the most robust and easiest
* to backpatch way of plugging the vulnerability.
*
* This is transparent to the typical usage pattern of
* "pg_get_expr(systemcolumn, ...)", but will break
* "pg_get_expr('foo', ...)", even if 'foo' is a valid expression fetched
* earlier from a system catalog. Hopefully there's isn't many clients
* doing that out there.
* "pg_get_expr(systemcolumn, ...)", but will break "pg_get_expr('foo',
* ...)", even if 'foo' is a valid expression fetched earlier from a
* system catalog. Hopefully there's isn't many clients doing that out
* there.
*/
if (result && IsA(result, FuncExpr) && !superuser())
if (result && IsA(result, FuncExpr) &&!superuser())
{
FuncExpr *fe = (FuncExpr *) result;
FuncExpr *fe = (FuncExpr *) result;
if (fe->funcid == F_PG_GET_EXPR || fe->funcid == F_PG_GET_EXPR_EXT)
{
Expr *arg = linitial(fe->args);
bool allowed = false;
Expr *arg = linitial(fe->args);
bool allowed = false;
/*
* Check that the argument came directly from one of the
* allowed system catalog columns
* Check that the argument came directly from one of the allowed
* system catalog columns
*/
if (IsA(arg, Var))
{
Var *var = (Var *) arg;
Var *var = (Var *) arg;
RangeTblEntry *rte;
rte = GetRTEByRangeTablePosn(pstate,
var->varno, var->varlevelsup);
switch(rte->relid)
switch (rte->relid)
{
case IndexRelationId:
if (var->varattno == Anum_pg_index_indexprs ||

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.41 2010/05/09 02:15:59 tgl Exp $
* $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.42 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -181,7 +181,7 @@ truncate_identifier(char *ident, int len, bool warn)
* We avoid using %.*s here because it can misbehave if the data
* is not valid in what libc thinks is the prevailing encoding.
*/
char buf[NAMEDATALEN];
char buf[NAMEDATALEN];
memcpy(buf, ident, len);
buf[len] = '\0';

View File

@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.56 2010/05/01 22:46:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.57 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -93,17 +93,17 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
return NULL;
/*
* Some BSD-derived kernels are known to return EINVAL, not EEXIST,
* if there is an existing segment but it's smaller than "size"
* (this is a result of poorly-thought-out ordering of error tests).
* To distinguish between collision and invalid size in such cases,
* we make a second try with size = 0. These kernels do not test
* size against SHMMIN in the preexisting-segment case, so we will
* not get EINVAL a second time if there is such a segment.
* Some BSD-derived kernels are known to return EINVAL, not EEXIST, if
* there is an existing segment but it's smaller than "size" (this is
* a result of poorly-thought-out ordering of error tests). To
* distinguish between collision and invalid size in such cases, we
* make a second try with size = 0. These kernels do not test size
* against SHMMIN in the preexisting-segment case, so we will not get
* EINVAL a second time if there is such a segment.
*/
if (errno == EINVAL)
{
int save_errno = errno;
int save_errno = errno;
shmid = shmget(memKey, 0, IPC_CREAT | IPC_EXCL | IPCProtection);
@ -122,9 +122,9 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
{
/*
* On most platforms we cannot get here because SHMMIN is
* greater than zero. However, if we do succeed in creating
* a zero-size segment, free it and then fall through to
* report the original error.
* greater than zero. However, if we do succeed in creating a
* zero-size segment, free it and then fall through to report
* the original error.
*/
if (shmctl(shmid, IPC_RMID, NULL) < 0)
elog(LOG, "shmctl(%d, %d, 0) failed: %m",

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.26 2010/02/26 02:00:53 momjian Exp $
* $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.27 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -457,7 +457,7 @@ pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, c
r = WSASend(writefds->fd_array[i], &buf, 1, &sent, 0, NULL, NULL);
if (r == 0) /* Completed - means things are fine! */
FD_SET (writefds->fd_array[i], &outwritefds);
FD_SET(writefds->fd_array[i], &outwritefds);
else
{ /* Not completed */
@ -467,7 +467,7 @@ pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, c
* Not completed, and not just "would block", so an error
* occured
*/
FD_SET (writefds->fd_array[i], &outwritefds);
FD_SET(writefds->fd_array[i], &outwritefds);
}
}
if (outwritefds.fd_count > 0)
@ -554,7 +554,7 @@ pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, c
(resEvents.lNetworkEvents & FD_ACCEPT) ||
(resEvents.lNetworkEvents & FD_CLOSE))
{
FD_SET (sockets[i], &outreadfds);
FD_SET(sockets[i], &outreadfds);
nummatches++;
}
@ -565,7 +565,7 @@ pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, c
if ((resEvents.lNetworkEvents & FD_WRITE) ||
(resEvents.lNetworkEvents & FD_CLOSE))
{
FD_SET (sockets[i], &outwritefds);
FD_SET(sockets[i], &outwritefds);
nummatches++;
}

View File

@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.18 2010/01/02 16:57:50 momjian Exp $
* $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.19 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -27,7 +27,7 @@ typedef struct timerCA
struct itimerval value;
HANDLE event;
CRITICAL_SECTION crit_sec;
} timerCA;
} timerCA;
static timerCA timerCommArea;
static HANDLE timerThreadHandle = INVALID_HANDLE_VALUE;

View File

@ -13,7 +13,7 @@
*
* Copyright (c) 2001-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.203 2010/03/24 16:07:10 tgl Exp $
* $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.204 2010/07/06 19:18:57 momjian Exp $
* ----------
*/
#include "postgres.h"
@ -431,7 +431,7 @@ retry1:
for (;;) /* need a loop to handle EINTR */
{
FD_ZERO(&rset);
FD_SET (pgStatSock, &rset);
FD_SET(pgStatSock, &rset);
tv.tv_sec = 0;
tv.tv_usec = 500000;
@ -2891,7 +2891,7 @@ PgstatCollectorMain(int argc, char *argv[])
got_data = (input_fd.revents != 0);
#else /* !HAVE_POLL */
FD_SET (pgStatSock, &rfds);
FD_SET(pgStatSock, &rfds);
/*
* timeout struct is modified by select() on some operating systems,
@ -3288,10 +3288,10 @@ pgstat_write_statsfile(bool permanent)
last_statwrite = globalStats.stats_timestamp;
/*
* If there is clock skew between backends and the collector, we
* could receive a stats request time that's in the future. If so,
* complain and reset last_statrequest. Resetting ensures that no
* inquiry message can cause more than one stats file write to occur.
* If there is clock skew between backends and the collector, we could
* receive a stats request time that's in the future. If so, complain
* and reset last_statrequest. Resetting ensures that no inquiry
* message can cause more than one stats file write to occur.
*/
if (last_statrequest > last_statwrite)
{

View File

@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.613 2010/06/24 16:40:45 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.614 2010/07/06 19:18:57 momjian Exp $
*
* NOTES
*
@ -238,7 +238,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */
*
* When the startup process is ready to start archive recovery, it signals the
* postmaster, and we switch to PM_RECOVERY state. The background writer is
* launched, while the startup process continues applying WAL. If Hot Standby
* launched, while the startup process continues applying WAL. If Hot Standby
* is enabled, then, after reaching a consistent point in WAL redo, startup
* process signals us again, and we switch to PM_HOT_STANDBY state and
* begin accepting connections to perform read-only queries. When archive
@ -287,7 +287,7 @@ typedef enum
static PMState pmState = PM_INIT;
static bool ReachedNormalRunning = false; /* T if we've reached PM_RUN */
static bool ReachedNormalRunning = false; /* T if we've reached PM_RUN */
bool ClientAuthInProgress = false; /* T during new-client
* authentication */
@ -385,7 +385,7 @@ typedef struct
HANDLE waitHandle;
HANDLE procHandle;
DWORD procId;
} win32_deadchild_waitinfo;
} win32_deadchild_waitinfo;
HANDLE PostmasterHandle;
#endif
@ -400,7 +400,7 @@ typedef struct
SOCKET origsocket; /* Original socket value, or PGINVALID_SOCKET
* if not a socket */
WSAPROTOCOL_INFO wsainfo;
} InheritableSocket;
} InheritableSocket;
#else
typedef int InheritableSocket;
#endif
@ -447,15 +447,15 @@ typedef struct
char my_exec_path[MAXPGPATH];
char pkglib_path[MAXPGPATH];
char ExtraOptions[MAXPGPATH];
} BackendParameters;
} BackendParameters;
static void read_backend_variables(char *id, Port *port);
static void restore_backend_variables(BackendParameters *param, Port *port);
static void restore_backend_variables(BackendParameters * param, Port *port);
#ifndef WIN32
static bool save_backend_variables(BackendParameters *param, Port *port);
static bool save_backend_variables(BackendParameters * param, Port *port);
#else
static bool save_backend_variables(BackendParameters *param, Port *port,
static bool save_backend_variables(BackendParameters * param, Port *port,
HANDLE childProcess, pid_t childPid);
#endif
@ -1522,7 +1522,7 @@ initMasks(fd_set *rmask)
if (fd == PGINVALID_SOCKET)
break;
FD_SET (fd, rmask);
FD_SET(fd, rmask);
if (fd > maxsock)
maxsock = fd;
@ -2180,6 +2180,7 @@ pmdie(SIGNAL_ARGS)
/* and the walwriter too */
if (WalWriterPID != 0)
signal_child(WalWriterPID, SIGTERM);
/*
* If we're in recovery, we can't kill the startup process
* right away, because at present doing so does not release
@ -3033,8 +3034,8 @@ PostmasterStateMachine(void)
* Terminate backup mode to avoid recovery after a clean fast
* shutdown. Since a backup can only be taken during normal
* running (and not, for example, while running under Hot Standby)
* it only makes sense to do this if we reached normal running.
* If we're still in recovery, the backup file is one we're
* it only makes sense to do this if we reached normal running. If
* we're still in recovery, the backup file is one we're
* recovering *from*, and we must keep it around so that recovery
* restarts from the right place.
*/
@ -3390,13 +3391,13 @@ BackendInitialize(Port *port)
{
if (remote_port[0])
ereport(LOG,
(errmsg("connection received: host=%s port=%s",
remote_host,
remote_port)));
(errmsg("connection received: host=%s port=%s",
remote_host,
remote_port)));
else
ereport(LOG,
(errmsg("connection received: host=%s",
remote_host)));
(errmsg("connection received: host=%s",
remote_host)));
}
/*
@ -4601,19 +4602,19 @@ extern pgsocket pgStatSock;
#define read_inheritable_socket(dest, src) (*(dest) = *(src))
#else
static bool write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE child);
static bool write_inheritable_socket(InheritableSocket *dest, SOCKET src,
static bool write_inheritable_socket(InheritableSocket * dest, SOCKET src,
pid_t childPid);
static void read_inheritable_socket(SOCKET *dest, InheritableSocket *src);
static void read_inheritable_socket(SOCKET * dest, InheritableSocket * src);
#endif
/* Save critical backend variables into the BackendParameters struct */
#ifndef WIN32
static bool
save_backend_variables(BackendParameters *param, Port *port)
save_backend_variables(BackendParameters * param, Port *port)
#else
static bool
save_backend_variables(BackendParameters *param, Port *port,
save_backend_variables(BackendParameters * param, Port *port,
HANDLE childProcess, pid_t childPid)
#endif
{
@ -4705,7 +4706,7 @@ write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE childProcess)
* straight socket inheritance.
*/
static bool
write_inheritable_socket(InheritableSocket *dest, SOCKET src, pid_t childpid)
write_inheritable_socket(InheritableSocket * dest, SOCKET src, pid_t childpid)
{
dest->origsocket = src;
if (src != 0 && src != PGINVALID_SOCKET)
@ -4726,7 +4727,7 @@ write_inheritable_socket(InheritableSocket *dest, SOCKET src, pid_t childpid)
* Read a duplicate socket structure back, and get the socket descriptor.
*/
static void
read_inheritable_socket(SOCKET *dest, InheritableSocket *src)
read_inheritable_socket(SOCKET * dest, InheritableSocket * src)
{
SOCKET s;
@ -4831,7 +4832,7 @@ read_backend_variables(char *id, Port *port)
/* Restore critical backend variables from the BackendParameters struct */
static void
restore_backend_variables(BackendParameters *param, Port *port)
restore_backend_variables(BackendParameters * param, Port *port)
{
memcpy(port, &param->port, sizeof(Port));
read_inheritable_socket(&port->sock, &param->portsocket);

View File

@ -18,7 +18,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.57 2010/04/16 09:51:49 heikki Exp $
* $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.58 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -372,7 +372,7 @@ SysLoggerMain(int argc, char *argv[])
* Wait for some data, timing out after 1 second
*/
FD_ZERO(&rfds);
FD_SET (syslogPipe[0], &rfds);
FD_SET(syslogPipe[0], &rfds);
timeout.tv_sec = 1;
timeout.tv_usec = 0;
@ -425,9 +425,9 @@ SysLoggerMain(int argc, char *argv[])
* detect pipe EOF. The main thread just wakes up once a second to
* check for SIGHUP and rotation conditions.
*
* Server code isn't generally thread-safe, so we ensure that only
* one of the threads is active at a time by entering the critical
* section whenever we're not sleeping.
* Server code isn't generally thread-safe, so we ensure that only one
* of the threads is active at a time by entering the critical section
* whenever we're not sleeping.
*/
LeaveCriticalSection(&sysloggerSection);

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c,v 1.11 2010/06/11 10:13:09 heikki Exp $
* $PostgreSQL: pgsql/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c,v 1.12 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -86,9 +86,9 @@ libpqrcv_connect(char *conninfo, XLogRecPtr startpoint)
char cmd[64];
/*
* Connect using deliberately undocumented parameter: replication.
* The database name is ignored by the server in replication mode, but
* specify "replication" for .pgpass lookup.
* Connect using deliberately undocumented parameter: replication. The
* database name is ignored by the server in replication mode, but specify
* "replication" for .pgpass lookup.
*/
snprintf(conninfo_repl, sizeof(conninfo_repl),
"%s dbname=replication replication=true",
@ -168,7 +168,7 @@ libpqrcv_connect(char *conninfo, XLogRecPtr startpoint)
justconnected = true;
ereport(LOG,
(errmsg("streaming replication successfully connected to primary")));
(errmsg("streaming replication successfully connected to primary")));
return true;
}
@ -209,7 +209,7 @@ libpq_select(int timeout_ms)
struct timeval *ptr_timeout;
FD_ZERO(&input_mask);
FD_SET (PQsocket(streamConn), &input_mask);
FD_SET(PQsocket(streamConn), &input_mask);
if (timeout_ms < 0)
ptr_timeout = NULL;
@ -253,19 +253,18 @@ libpq_select(int timeout_ms)
static PGresult *
libpqrcv_PQexec(const char *query)
{
PGresult *result = NULL;
PGresult *lastResult = NULL;
PGresult *result = NULL;
PGresult *lastResult = NULL;
/*
* PQexec() silently discards any prior query results on the
* connection. This is not required for walreceiver since it's
* expected that walsender won't generate any such junk results.
* PQexec() silently discards any prior query results on the connection.
* This is not required for walreceiver since it's expected that walsender
* won't generate any such junk results.
*/
/*
* Submit a query. Since we don't use non-blocking mode, this also
* can block. But its risk is relatively small, so we ignore that
* for now.
* Submit a query. Since we don't use non-blocking mode, this also can
* block. But its risk is relatively small, so we ignore that for now.
*/
if (!PQsendQuery(streamConn, query))
return NULL;
@ -273,16 +272,16 @@ libpqrcv_PQexec(const char *query)
for (;;)
{
/*
* Receive data until PQgetResult is ready to get the result
* without blocking.
* Receive data until PQgetResult is ready to get the result without
* blocking.
*/
while (PQisBusy(streamConn))
{
/*
* We don't need to break down the sleep into smaller increments,
* and check for interrupts after each nap, since we can just
* elog(FATAL) within SIGTERM signal handler if the signal
* arrives in the middle of establishment of replication connection.
* elog(FATAL) within SIGTERM signal handler if the signal arrives
* in the middle of establishment of replication connection.
*/
if (!libpq_select(-1))
continue; /* interrupted */
@ -291,10 +290,9 @@ libpqrcv_PQexec(const char *query)
}
/*
* Emulate the PQexec()'s behavior of returning the last result
* when there are many.
* Since walsender will never generate multiple results, we skip
* the concatenation of error messages.
* Emulate the PQexec()'s behavior of returning the last result when
* there are many. Since walsender will never generate multiple
* results, we skip the concatenation of error messages.
*/
result = PQgetResult(streamConn);
if (result == NULL)

View File

@ -29,7 +29,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.15 2010/07/03 20:43:57 tgl Exp $
* $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.16 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -159,6 +159,7 @@ WalReceiverMain(void)
{
char conninfo[MAXCONNINFO];
XLogRecPtr startpoint;
/* use volatile pointer to prevent code rearrangement */
volatile WalRcvData *walrcv = WalRcv;

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/replication/walreceiverfuncs.c,v 1.6 2010/07/03 20:43:57 tgl Exp $
* $PostgreSQL: pgsql/src/backend/replication/walreceiverfuncs.c,v 1.7 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -211,7 +211,7 @@ RequestXLogStreaming(XLogRecPtr recptr, const char *conninfo)
* Returns the last+1 byte position that walreceiver has written.
*
* Optionally, returns the previous chunk start, that is the first byte
* written in the most recent walreceiver flush cycle. Callers not
* written in the most recent walreceiver flush cycle. Callers not
* interested in that value may pass NULL for latestChunkStart.
*/
XLogRecPtr

View File

@ -28,7 +28,7 @@
* Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/replication/walsender.c,v 1.27 2010/06/17 16:41:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/replication/walsender.c,v 1.28 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,7 +66,8 @@ bool am_walsender = false; /* Am I a walsender process ? */
int max_wal_senders = 0; /* the maximum number of concurrent walsenders */
int WalSndDelay = 200; /* max sleep time between some actions */
#define NAPTIME_PER_CYCLE 100000L /* max sleep time between cycles (100ms) */
#define NAPTIME_PER_CYCLE 100000L /* max sleep time between cycles
* (100ms) */
/*
* These variables are used similarly to openLogFile/Id/Seg/Off,
@ -266,10 +267,10 @@ WalSndHandshake(void)
* NOTE: This only checks the current value of
* wal_level. Even if the current setting is not
* 'minimal', there can be old WAL in the pg_xlog
* directory that was created with 'minimal'.
* So this is not bulletproof, the purpose is
* just to give a user-friendly error message that
* hints how to configure the system correctly.
* directory that was created with 'minimal'. So this
* is not bulletproof, the purpose is just to give a
* user-friendly error message that hints how to
* configure the system correctly.
*/
if (wal_level == WAL_LEVEL_MINIMAL)
ereport(FATAL,
@ -378,7 +379,7 @@ WalSndLoop(void)
/* Loop forever, unless we get an error */
for (;;)
{
long remain; /* remaining time (us) */
long remain; /* remaining time (us) */
/*
* Emergency bailout if postmaster has died. This is to avoid the
@ -422,8 +423,8 @@ WalSndLoop(void)
*
* On some platforms, signals won't interrupt the sleep. To ensure we
* respond reasonably promptly when someone signals us, break down the
* sleep into NAPTIME_PER_CYCLE increments, and check for
* interrupts after each nap.
* sleep into NAPTIME_PER_CYCLE increments, and check for interrupts
* after each nap.
*/
if (caughtup)
{
@ -503,8 +504,8 @@ InitWalSnd(void)
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
errmsg("number of requested standby connections "
"exceeds max_wal_senders (currently %d)",
max_wal_senders)));
"exceeds max_wal_senders (currently %d)",
max_wal_senders)));
/* Arrange to clean up at walsender exit */
on_shmem_exit(WalSndKill, 0);
@ -563,13 +564,14 @@ XLogRead(char *buf, XLogRecPtr recptr, Size nbytes)
if (sendFile < 0)
{
/*
* If the file is not found, assume it's because the
* standby asked for a too old WAL segment that has already
* been removed or recycled.
* If the file is not found, assume it's because the standby
* asked for a too old WAL segment that has already been
* removed or recycled.
*/
if (errno == ENOENT)
{
char filename[MAXFNAMELEN];
char filename[MAXFNAMELEN];
XLogFileName(filename, ThisTimeLineID, sendId, sendSeg);
ereport(ERROR,
(errcode_for_file_access(),
@ -619,10 +621,10 @@ XLogRead(char *buf, XLogRecPtr recptr, Size nbytes)
}
/*
* After reading into the buffer, check that what we read was valid.
* We do this after reading, because even though the segment was present
* when we opened it, it might get recycled or removed while we read it.
* The read() succeeds in that case, but the data we tried to read might
* After reading into the buffer, check that what we read was valid. We do
* this after reading, because even though the segment was present when we
* opened it, it might get recycled or removed while we read it. The
* read() succeeds in that case, but the data we tried to read might
* already have been overwritten with new WAL records.
*/
XLogGetLastRemoved(&lastRemovedLog, &lastRemovedSeg);
@ -630,7 +632,8 @@ XLogRead(char *buf, XLogRecPtr recptr, Size nbytes)
if (log < lastRemovedLog ||
(log == lastRemovedLog && seg <= lastRemovedSeg))
{
char filename[MAXFNAMELEN];
char filename[MAXFNAMELEN];
XLogFileName(filename, ThisTimeLineID, log, seg);
ereport(ERROR,
(errcode_for_file_access(),
@ -662,8 +665,8 @@ XLogSend(char *msgbuf, bool *caughtup)
WalDataMessageHeader msghdr;
/*
* Attempt to send all data that's already been written out and fsync'd
* to disk. We cannot go further than what's been written out given the
* Attempt to send all data that's already been written out and fsync'd to
* disk. We cannot go further than what's been written out given the
* current implementation of XLogRead(). And in any case it's unsafe to
* send WAL that is not securely down to disk on the master: if the master
* subsequently crashes and restarts, slaves must not have applied any WAL
@ -683,19 +686,18 @@ XLogSend(char *msgbuf, bool *caughtup)
* MAX_SEND_SIZE bytes to send, send everything. Otherwise send
* MAX_SEND_SIZE bytes, but round back to logfile or page boundary.
*
* The rounding is not only for performance reasons. Walreceiver
* relies on the fact that we never split a WAL record across two
* messages. Since a long WAL record is split at page boundary into
* continuation records, page boundary is always a safe cut-off point.
* We also assume that SendRqstPtr never points to the middle of a WAL
* record.
* The rounding is not only for performance reasons. Walreceiver relies on
* the fact that we never split a WAL record across two messages. Since a
* long WAL record is split at page boundary into continuation records,
* page boundary is always a safe cut-off point. We also assume that
* SendRqstPtr never points to the middle of a WAL record.
*/
startptr = sentPtr;
if (startptr.xrecoff >= XLogFileSize)
{
/*
* crossing a logid boundary, skip the non-existent last log
* segment in previous logical log file.
* crossing a logid boundary, skip the non-existent last log segment
* in previous logical log file.
*/
startptr.xlogid += 1;
startptr.xrecoff = 0;
@ -739,8 +741,8 @@ XLogSend(char *msgbuf, bool *caughtup)
XLogRead(msgbuf + 1 + sizeof(WalDataMessageHeader), startptr, nbytes);
/*
* We fill the message header last so that the send timestamp is taken
* as late as possible.
* We fill the message header last so that the send timestamp is taken as
* late as possible.
*/
msghdr.dataStart = startptr;
msghdr.walEnd = SendRqstPtr;
@ -931,4 +933,5 @@ GetOldestWALSendPointer(void)
}
return oldest;
}
#endif

View File

@ -11,7 +11,7 @@
* as a service.
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/file/copydir.c,v 1.1 2010/07/02 17:03:30 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/storage/file/copydir.c,v 1.2 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -70,8 +70,8 @@ copydir(char *fromdir, char *todir, bool recurse)
{
struct stat fst;
/* If we got a cancel signal during the copy of the directory, quit */
CHECK_FOR_INTERRUPTS();
/* If we got a cancel signal during the copy of the directory, quit */
CHECK_FOR_INTERRUPTS();
if (strcmp(xlde->d_name, ".") == 0 ||
strcmp(xlde->d_name, "..") == 0)
@ -176,8 +176,8 @@ copy_file(char *fromfile, char *tofile)
*/
for (offset = 0;; offset += nbytes)
{
/* If we got a cancel signal during the copy of the file, quit */
CHECK_FOR_INTERRUPTS();
/* If we got a cancel signal during the copy of the file, quit */
CHECK_FOR_INTERRUPTS();
nbytes = read(srcfd, buffer, COPY_BUF_SIZE);
if (nbytes < 0)
@ -226,12 +226,12 @@ static void
fsync_fname(char *fname, bool isdir)
{
int fd;
int returncode;
int returncode;
/*
* Some OSs require directories to be opened read-only whereas
* other systems don't allow us to fsync files opened read-only; so
* we need both cases here
* Some OSs require directories to be opened read-only whereas other
* systems don't allow us to fsync files opened read-only; so we need both
* cases here
*/
if (!isdir)
fd = BasicOpenFile(fname,
@ -243,8 +243,8 @@ fsync_fname(char *fname, bool isdir)
S_IRUSR | S_IWUSR);
/*
* Some OSs don't allow us to open directories at all
* (Windows returns EACCES)
* Some OSs don't allow us to open directories at all (Windows returns
* EACCES)
*/
if (fd < 0 && isdir && (errno == EISDIR || errno == EACCES))
return;
@ -255,7 +255,7 @@ fsync_fname(char *fname, bool isdir)
errmsg("could not open file \"%s\": %m", fname)));
returncode = pg_fsync(fd);
/* Some OSs don't allow us to fsync directories at all */
if (returncode != 0 && isdir && errno == EBADF)
{

View File

@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.107 2010/03/20 00:58:09 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.108 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -165,13 +165,13 @@ proc_exit_prepare(int code)
CritSectionCount = 0;
/*
* Also clear the error context stack, to prevent error callbacks
* from being invoked by any elog/ereport calls made during proc_exit.
* Whatever context they might want to offer is probably not relevant,
* and in any case they are likely to fail outright after we've done
* things like aborting any open transaction. (In normal exit scenarios
* the context stack should be empty anyway, but it might not be in the
* case of elog(FATAL) for example.)
* Also clear the error context stack, to prevent error callbacks from
* being invoked by any elog/ereport calls made during proc_exit. Whatever
* context they might want to offer is probably not relevant, and in any
* case they are likely to fail outright after we've done things like
* aborting any open transaction. (In normal exit scenarios the context
* stack should be empty anyway, but it might not be in the case of
* elog(FATAL) for example.)
*/
error_context_stack = NULL;
/* For the same reason, reset debug_query_string before it's clobbered */

View File

@ -19,11 +19,11 @@
*
* During hot standby, we also keep a list of XIDs representing transactions
* that are known to be running in the master (or more precisely, were running
* as of the current point in the WAL stream). This list is kept in the
* as of the current point in the WAL stream). This list is kept in the
* KnownAssignedXids array, and is updated by watching the sequence of
* arriving XIDs. This is necessary because if we leave those XIDs out of
* snapshots taken for standby queries, then they will appear to be already
* complete, leading to MVCC failures. Note that in hot standby, the PGPROC
* complete, leading to MVCC failures. Note that in hot standby, the PGPROC
* array represents standby processes, which by definition are not running
* transactions that have XIDs.
*
@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.71 2010/07/03 21:23:58 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.72 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -71,7 +71,7 @@ typedef struct ProcArrayStruct
int numKnownAssignedXids; /* currrent # of valid entries */
int tailKnownAssignedXids; /* index of oldest valid element */
int headKnownAssignedXids; /* index of newest element, + 1 */
slock_t known_assigned_xids_lck; /* protects head/tail pointers */
slock_t known_assigned_xids_lck; /* protects head/tail pointers */
/*
* Highest subxid that has been removed from KnownAssignedXids array to
@ -145,17 +145,17 @@ static void DisplayXidCache(void);
/* Primitives for KnownAssignedXids array handling for standby */
static void KnownAssignedXidsCompress(bool force);
static void KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
bool exclusive_lock);
bool exclusive_lock);
static bool KnownAssignedXidsSearch(TransactionId xid, bool remove);
static bool KnownAssignedXidExists(TransactionId xid);
static void KnownAssignedXidsRemove(TransactionId xid);
static void KnownAssignedXidsRemoveTree(TransactionId xid, int nsubxids,
TransactionId *subxids);
TransactionId *subxids);
static void KnownAssignedXidsRemovePreceding(TransactionId xid);
static int KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax);
static int KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax);
static int KnownAssignedXidsGetAndSetXmin(TransactionId *xarray,
TransactionId *xmin,
TransactionId xmax);
TransactionId *xmin,
TransactionId xmax);
static void KnownAssignedXidsDisplay(int trace_level);
/*
@ -181,9 +181,9 @@ ProcArrayShmemSize(void)
* since we may at times copy the whole of the data structures around. We
* refer to this size as TOTAL_MAX_CACHED_SUBXIDS.
*
* Ideally we'd only create this structure if we were actually doing
* hot standby in the current run, but we don't know that yet at the
* time shared memory is being set up.
* Ideally we'd only create this structure if we were actually doing hot
* standby in the current run, but we don't know that yet at the time
* shared memory is being set up.
*/
#define TOTAL_MAX_CACHED_SUBXIDS \
((PGPROC_MAX_CACHED_SUBXIDS + 1) * PROCARRAY_MAXPROCS)
@ -465,9 +465,9 @@ void
ProcArrayApplyRecoveryInfo(RunningTransactions running)
{
TransactionId *xids;
int nxids;
int nxids;
TransactionId nextXid;
int i;
int i;
Assert(standbyState >= STANDBY_INITIALIZED);
Assert(TransactionIdIsValid(running->nextXid));
@ -510,8 +510,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
else
elog(trace_recovery(DEBUG2),
"recovery snapshot waiting for %u oldest active xid on standby is %u",
standbySnapshotPendingXmin,
running->oldestRunningXid);
standbySnapshotPendingXmin,
running->oldestRunningXid);
return;
}
@ -523,8 +523,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
/*
* Remove all xids except xids later than the snapshot. We don't know
* exactly which ones that is until precisely now, so that is why we
* allow xids to be added only to remove most of them again here.
* exactly which ones that is until precisely now, so that is why we allow
* xids to be added only to remove most of them again here.
*/
ExpireOldKnownAssignedTransactionIds(running->nextXid);
StandbyReleaseOldLocks(running->nextXid);
@ -536,41 +536,40 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
/*
* Combine the running xact data with already known xids, if any exist.
* KnownAssignedXids is sorted so we cannot just add new xids, we have
* to combine them first, sort them and then re-add to KnownAssignedXids.
* KnownAssignedXids is sorted so we cannot just add new xids, we have to
* combine them first, sort them and then re-add to KnownAssignedXids.
*
* Some of the new xids are top-level xids and some are subtransactions. We
* don't call SubtransSetParent because it doesn't matter yet. If we aren't
* overflowed then all xids will fit in snapshot and so we don't need
* subtrans. If we later overflow, an xid assignment record will add xids
* to subtrans. If RunningXacts is overflowed then we don't have enough
* information to correctly update subtrans anyway.
* Some of the new xids are top-level xids and some are subtransactions.
* We don't call SubtransSetParent because it doesn't matter yet. If we
* aren't overflowed then all xids will fit in snapshot and so we don't
* need subtrans. If we later overflow, an xid assignment record will add
* xids to subtrans. If RunningXacts is overflowed then we don't have
* enough information to correctly update subtrans anyway.
*/
/*
* Allocate a temporary array so we can combine xids. The total
* of both arrays should never normally exceed TOTAL_MAX_CACHED_SUBXIDS.
* Allocate a temporary array so we can combine xids. The total of both
* arrays should never normally exceed TOTAL_MAX_CACHED_SUBXIDS.
*/
xids = palloc(sizeof(TransactionId) * TOTAL_MAX_CACHED_SUBXIDS);
/*
* Get the remaining KnownAssignedXids. In most cases there won't
* be any at all since this exists only to catch a theoretical
* race condition.
* Get the remaining KnownAssignedXids. In most cases there won't be any
* at all since this exists only to catch a theoretical race condition.
*/
nxids = KnownAssignedXidsGet(xids, InvalidTransactionId);
if (nxids > 0)
KnownAssignedXidsDisplay(trace_recovery(DEBUG3));
/*
* Now we have a copy of any KnownAssignedXids we can zero the
* array before we re-insertion of combined snapshot.
* Now we have a copy of any KnownAssignedXids we can zero the array
* before we re-insertion of combined snapshot.
*/
KnownAssignedXidsRemovePreceding(InvalidTransactionId);
/*
* Add to the temp array any xids which have not already completed,
* taking care not to overflow in extreme cases.
* Add to the temp array any xids which have not already completed, taking
* care not to overflow in extreme cases.
*/
for (i = 0; i < running->xcnt; i++)
{
@ -597,7 +596,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
if (nxids > 0)
{
/*
* Sort the array so that we can add them safely into KnownAssignedXids.
* Sort the array so that we can add them safely into
* KnownAssignedXids.
*/
qsort(xids, nxids, sizeof(TransactionId), xidComparator);
@ -622,23 +622,21 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
pfree(xids);
/*
* Now we've got the running xids we need to set the global values
* thare used to track snapshots as they evolve further
* Now we've got the running xids we need to set the global values thare
* used to track snapshots as they evolve further
*
* * latestCompletedXid which will be the xmax for snapshots
* * lastOverflowedXid which shows whether snapshots overflow
* * nextXid
* * latestCompletedXid which will be the xmax for snapshots *
* lastOverflowedXid which shows whether snapshots overflow * nextXid
*
* If the snapshot overflowed, then we still initialise with what we know,
* but the recovery snapshot isn't fully valid yet because we know there
* are some subxids missing.
* We don't know the specific subxids that are missing, so conservatively
* assume the last one is latestObservedXid. If no missing subxids,
* try to clear lastOverflowedXid.
* are some subxids missing. We don't know the specific subxids that are
* missing, so conservatively assume the last one is latestObservedXid.
* If no missing subxids, try to clear lastOverflowedXid.
*
* If the snapshot didn't overflow it's still possible that an overflow
* occurred in the gap between taking snapshot and logging record, so
* we also need to check if lastOverflowedXid is already ahead of us.
* occurred in the gap between taking snapshot and logging record, so we
* also need to check if lastOverflowedXid is already ahead of us.
*/
if (running->subxid_overflow)
{
@ -650,7 +648,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
procArray->lastOverflowedXid = latestObservedXid;
}
else if (TransactionIdFollows(procArray->lastOverflowedXid,
latestObservedXid))
latestObservedXid))
{
standbyState = STANDBY_SNAPSHOT_PENDING;
@ -662,7 +660,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
standbySnapshotPendingXmin = InvalidTransactionId;
if (TransactionIdFollows(running->oldestRunningXid,
procArray->lastOverflowedXid))
procArray->lastOverflowedXid))
procArray->lastOverflowedXid = InvalidTransactionId;
}
@ -933,10 +931,10 @@ TransactionIdIsInProgress(TransactionId xid)
/*
* If the KnownAssignedXids overflowed, we have to check pg_subtrans
* too. Fetch all xids from KnownAssignedXids that are lower than xid,
* since if xid is a subtransaction its parent will always have a
* lower value. Note we will collect both main and subXIDs here,
* but there's no help for it.
* too. Fetch all xids from KnownAssignedXids that are lower than
* xid, since if xid is a subtransaction its parent will always have a
* lower value. Note we will collect both main and subXIDs here, but
* there's no help for it.
*/
if (TransactionIdPrecedesOrEquals(xid, procArray->lastOverflowedXid))
nxids = KnownAssignedXidsGet(xids, xid);
@ -1117,15 +1115,16 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
LWLockRelease(ProcArrayLock);
/*
* Compute the cutoff XID, being careful not to generate a "permanent" XID.
* Compute the cutoff XID, being careful not to generate a "permanent"
* XID.
*
* vacuum_defer_cleanup_age provides some additional "slop" for the
* benefit of hot standby queries on slave servers. This is quick and
* dirty, and perhaps not all that useful unless the master has a
* predictable transaction rate, but it's what we've got. Note that
* we are assuming vacuum_defer_cleanup_age isn't large enough to cause
* wraparound --- so guc.c should limit it to no more than the xidStopLimit
* threshold in varsup.c.
* predictable transaction rate, but it's what we've got. Note that we
* are assuming vacuum_defer_cleanup_age isn't large enough to cause
* wraparound --- so guc.c should limit it to no more than the
* xidStopLimit threshold in varsup.c.
*/
result -= vacuum_defer_cleanup_age;
if (!TransactionIdIsNormal(result))
@ -1229,8 +1228,8 @@ GetSnapshotData(Snapshot snapshot)
/*
* If we're in recovery then snapshot data comes from a different place,
* so decide which route we take before grab the lock. It is possible
* for recovery to end before we finish taking snapshot, and for newly
* so decide which route we take before grab the lock. It is possible for
* recovery to end before we finish taking snapshot, and for newly
* assigned transaction ids to be added to the procarray. Xmax cannot
* change while we hold ProcArrayLock, so those newly added transaction
* ids would be filtered away, so we need not be concerned about them.
@ -1240,8 +1239,8 @@ GetSnapshotData(Snapshot snapshot)
if (!snapshot->takenDuringRecovery)
{
/*
* Spin over procArray checking xid, xmin, and subxids. The goal is to
* gather all active xids, find the lowest xmin, and try to record
* Spin over procArray checking xid, xmin, and subxids. The goal is
* to gather all active xids, find the lowest xmin, and try to record
* subxids. During recovery no xids will be assigned, so all normal
* backends can be ignored, nor are there any VACUUMs running. All
* prepared transaction xids are held in KnownAssignedXids, so these
@ -1257,7 +1256,7 @@ GetSnapshotData(Snapshot snapshot)
continue;
/* Update globalxmin to be the smallest valid xmin */
xid = proc->xmin; /* fetch just once */
xid = proc->xmin; /* fetch just once */
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, globalxmin))
globalxmin = xid;
@ -1266,13 +1265,13 @@ GetSnapshotData(Snapshot snapshot)
xid = proc->xid;
/*
* If the transaction has been assigned an xid < xmax we add it to the
* snapshot, and update xmin if necessary. There's no need to store
* XIDs >= xmax, since we'll treat them as running anyway. We don't
* bother to examine their subxids either.
* If the transaction has been assigned an xid < xmax we add it to
* the snapshot, and update xmin if necessary. There's no need to
* store XIDs >= xmax, since we'll treat them as running anyway.
* We don't bother to examine their subxids either.
*
* We don't include our own XID (if any) in the snapshot, but we must
* include it into xmin.
* We don't include our own XID (if any) in the snapshot, but we
* must include it into xmin.
*/
if (TransactionIdIsNormal(xid))
{
@ -1285,16 +1284,17 @@ GetSnapshotData(Snapshot snapshot)
}
/*
* Save subtransaction XIDs if possible (if we've already overflowed,
* there's no point). Note that the subxact XIDs must be later than
* their parent, so no need to check them against xmin. We could
* filter against xmax, but it seems better not to do that much work
* while holding the ProcArrayLock.
* Save subtransaction XIDs if possible (if we've already
* overflowed, there's no point). Note that the subxact XIDs must
* be later than their parent, so no need to check them against
* xmin. We could filter against xmax, but it seems better not to
* do that much work while holding the ProcArrayLock.
*
* The other backend can add more subxids concurrently, but cannot
* remove any. Hence it's important to fetch nxids just once. Should
* be safe to use memcpy, though. (We needn't worry about missing any
* xids added concurrently, because they must postdate xmax.)
* remove any. Hence it's important to fetch nxids just once.
* Should be safe to use memcpy, though. (We needn't worry about
* missing any xids added concurrently, because they must postdate
* xmax.)
*
* Again, our own XIDs are not included in the snapshot.
*/
@ -1805,7 +1805,7 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0,
* us then the conflict assessment made here would never include the snapshot
* that is being derived. So we take LW_SHARED on the ProcArray and allow
* concurrent snapshots when limitXmin is valid. We might think about adding
* Assert(limitXmin < lowest(KnownAssignedXids))
* Assert(limitXmin < lowest(KnownAssignedXids))
* but that would not be true in the case of FATAL errors lagging in array,
* but we already know those are bogus anyway, so we skip that test.
*
@ -2273,7 +2273,7 @@ DisplayXidCache(void)
* treated as running by standby transactions, even though they are not in
* the standby server's PGPROC array.
*
* We record all XIDs that we know have been assigned. That includes all the
* We record all XIDs that we know have been assigned. That includes all the
* XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have
* been assigned. We can deduce the existence of unobserved XIDs because we
* know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids
@ -2282,7 +2282,7 @@ DisplayXidCache(void)
*
* During hot standby we do not fret too much about the distinction between
* top-level XIDs and subtransaction XIDs. We store both together in the
* KnownAssignedXids list. In backends, this is copied into snapshots in
* KnownAssignedXids list. In backends, this is copied into snapshots in
* GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot()
* doesn't care about the distinction either. Subtransaction XIDs are
* effectively treated as top-level XIDs and in the typical case pg_subtrans
@ -2338,7 +2338,7 @@ RecordKnownAssignedTransactionIds(TransactionId xid)
Assert(TransactionIdIsValid(xid));
elog(trace_recovery(DEBUG4), "record known xact %u latestObservedXid %u",
xid, latestObservedXid);
xid, latestObservedXid);
/*
* When a newly observed xid arrives, it is frequently the case that it is
@ -2350,9 +2350,9 @@ RecordKnownAssignedTransactionIds(TransactionId xid)
TransactionId next_expected_xid;
/*
* Extend clog and subtrans like we do in GetNewTransactionId()
* during normal operation using individual extend steps.
* Typical case requires almost no activity.
* Extend clog and subtrans like we do in GetNewTransactionId() during
* normal operation using individual extend steps. Typical case
* requires almost no activity.
*/
next_expected_xid = latestObservedXid;
TransactionIdAdvance(next_expected_xid);
@ -2391,7 +2391,7 @@ RecordKnownAssignedTransactionIds(TransactionId xid)
*/
void
ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids,
TransactionId *subxids, TransactionId max_xid)
TransactionId *subxids, TransactionId max_xid)
{
Assert(standbyState >= STANDBY_INITIALIZED);
@ -2485,14 +2485,14 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid)
* must hold shared ProcArrayLock to examine the array. To remove XIDs from
* the array, the startup process must hold ProcArrayLock exclusively, for
* the usual transactional reasons (compare commit/abort of a transaction
* during normal running). Compressing unused entries out of the array
* during normal running). Compressing unused entries out of the array
* likewise requires exclusive lock. To add XIDs to the array, we just insert
* them into slots to the right of the head pointer and then advance the head
* pointer. This wouldn't require any lock at all, except that on machines
* with weak memory ordering we need to be careful that other processors
* see the array element changes before they see the head pointer change.
* We handle this by using a spinlock to protect reads and writes of the
* head/tail pointers. (We could dispense with the spinlock if we were to
* head/tail pointers. (We could dispense with the spinlock if we were to
* create suitable memory access barrier primitives and use those instead.)
* The spinlock must be taken to read or write the head/tail pointers unless
* the caller holds ProcArrayLock exclusively.
@ -2534,9 +2534,10 @@ KnownAssignedXidsCompress(bool force)
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
int head, tail;
int compress_index;
int i;
int head,
tail;
int compress_index;
int i;
/* no spinlock required since we hold ProcArrayLock exclusively */
head = pArray->headKnownAssignedXids;
@ -2545,16 +2546,16 @@ KnownAssignedXidsCompress(bool force)
if (!force)
{
/*
* If we can choose how much to compress, use a heuristic to
* avoid compressing too often or not often enough.
* If we can choose how much to compress, use a heuristic to avoid
* compressing too often or not often enough.
*
* Heuristic is if we have a large enough current spread and
* less than 50% of the elements are currently in use, then
* compress. This should ensure we compress fairly infrequently.
* We could compress less often though the virtual array would
* spread out more and snapshots would become more expensive.
* Heuristic is if we have a large enough current spread and less than
* 50% of the elements are currently in use, then compress. This
* should ensure we compress fairly infrequently. We could compress
* less often though the virtual array would spread out more and
* snapshots would become more expensive.
*/
int nelements = head - tail;
int nelements = head - tail;
if (nelements < 4 * PROCARRAY_MAXPROCS ||
nelements < 2 * pArray->numKnownAssignedXids)
@ -2562,8 +2563,8 @@ KnownAssignedXidsCompress(bool force)
}
/*
* We compress the array by reading the valid values from tail
* to head, re-aligning data to 0th element.
* We compress the array by reading the valid values from tail to head,
* re-aligning data to 0th element.
*/
compress_index = 0;
for (i = tail; i < head; i++)
@ -2588,7 +2589,7 @@ KnownAssignedXidsCompress(bool force)
* If exclusive_lock is true then caller already holds ProcArrayLock in
* exclusive mode, so we need no extra locking here. Else caller holds no
* lock, so we need to be sure we maintain sufficient interlocks against
* concurrent readers. (Only the startup process ever calls this, so no need
* concurrent readers. (Only the startup process ever calls this, so no need
* to worry about concurrent writers.)
*/
static void
@ -2597,17 +2598,18 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
TransactionId next_xid;
int head, tail;
TransactionId next_xid;
int head,
tail;
int nxids;
int i;
Assert(TransactionIdPrecedesOrEquals(from_xid, to_xid));
/*
* Calculate how many array slots we'll need. Normally this is cheap;
* in the unusual case where the XIDs cross the wrap point, we do it the
* hard way.
* Calculate how many array slots we'll need. Normally this is cheap; in
* the unusual case where the XIDs cross the wrap point, we do it the hard
* way.
*/
if (to_xid >= from_xid)
nxids = to_xid - from_xid + 1;
@ -2623,8 +2625,8 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
}
/*
* Since only the startup process modifies the head/tail pointers,
* we don't need a lock to read them here.
* Since only the startup process modifies the head/tail pointers, we
* don't need a lock to read them here.
*/
head = pArray->headKnownAssignedXids;
tail = pArray->tailKnownAssignedXids;
@ -2633,9 +2635,9 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids);
/*
* Verify that insertions occur in TransactionId sequence. Note that
* even if the last existing element is marked invalid, it must still
* have a correctly sequenced XID value.
* Verify that insertions occur in TransactionId sequence. Note that even
* if the last existing element is marked invalid, it must still have a
* correctly sequenced XID value.
*/
if (head > tail &&
TransactionIdFollowsOrEquals(KnownAssignedXids[head - 1], from_xid))
@ -2687,8 +2689,8 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
* ensure that other processors see the above array updates before they
* see the head pointer change.
*
* If we're holding ProcArrayLock exclusively, there's no need to take
* the spinlock.
* If we're holding ProcArrayLock exclusively, there's no need to take the
* spinlock.
*/
if (exclusive_lock)
pArray->headKnownAssignedXids = head;
@ -2714,10 +2716,11 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove)
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
int first, last;
int head;
int tail;
int result_index = -1;
int first,
last;
int head;
int tail;
int result_index = -1;
if (remove)
{
@ -2735,15 +2738,15 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove)
}
/*
* Standard binary search. Note we can ignore the KnownAssignedXidsValid
* Standard binary search. Note we can ignore the KnownAssignedXidsValid
* array here, since even invalid entries will contain sorted XIDs.
*/
first = tail;
last = head - 1;
while (first <= last)
{
int mid_index;
TransactionId mid_xid;
int mid_index;
TransactionId mid_xid;
mid_index = (first + last) / 2;
mid_xid = KnownAssignedXids[mid_index];
@ -2825,12 +2828,12 @@ KnownAssignedXidsRemove(TransactionId xid)
/*
* Note: we cannot consider it an error to remove an XID that's not
* present. We intentionally remove subxact IDs while processing
* XLOG_XACT_ASSIGNMENT, to avoid array overflow. Then those XIDs
* will be removed again when the top-level xact commits or aborts.
* XLOG_XACT_ASSIGNMENT, to avoid array overflow. Then those XIDs will be
* removed again when the top-level xact commits or aborts.
*
* It might be possible to track such XIDs to distinguish this case
* from actual errors, but it would be complicated and probably not
* worth it. So, just ignore the search result.
* It might be possible to track such XIDs to distinguish this case from
* actual errors, but it would be complicated and probably not worth it.
* So, just ignore the search result.
*/
(void) KnownAssignedXidsSearch(xid, true);
}
@ -2845,7 +2848,7 @@ static void
KnownAssignedXidsRemoveTree(TransactionId xid, int nsubxids,
TransactionId *subxids)
{
int i;
int i;
if (TransactionIdIsValid(xid))
KnownAssignedXidsRemove(xid);
@ -2868,8 +2871,10 @@ KnownAssignedXidsRemovePreceding(TransactionId removeXid)
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
int count = 0;
int head, tail, i;
int count = 0;
int head,
tail,
i;
if (!TransactionIdIsValid(removeXid))
{
@ -2882,8 +2887,8 @@ KnownAssignedXidsRemovePreceding(TransactionId removeXid)
elog(trace_recovery(DEBUG4), "prune KnownAssignedXids to %u", removeXid);
/*
* Mark entries invalid starting at the tail. Since array is sorted,
* we can stop as soon as we reach a entry >= removeXid.
* Mark entries invalid starting at the tail. Since array is sorted, we
* can stop as soon as we reach a entry >= removeXid.
*/
tail = pArray->tailKnownAssignedXids;
head = pArray->headKnownAssignedXids;
@ -2892,7 +2897,7 @@ KnownAssignedXidsRemovePreceding(TransactionId removeXid)
{
if (KnownAssignedXidsValid[i])
{
TransactionId knownXid = KnownAssignedXids[i];
TransactionId knownXid = KnownAssignedXids[i];
if (TransactionIdFollowsOrEquals(knownXid, removeXid))
break;
@ -2961,15 +2966,16 @@ KnownAssignedXidsGetAndSetXmin(TransactionId *xarray, TransactionId *xmin,
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
int count = 0;
int head, tail;
int head,
tail;
int i;
/*
* Fetch head just once, since it may change while we loop.
* We can stop once we reach the initially seen head, since
* we are certain that an xid cannot enter and then leave the
* array while we hold ProcArrayLock. We might miss newly-added
* xids, but they should be >= xmax so irrelevant anyway.
* Fetch head just once, since it may change while we loop. We can stop
* once we reach the initially seen head, since we are certain that an xid
* cannot enter and then leave the array while we hold ProcArrayLock. We
* might miss newly-added xids, but they should be >= xmax so irrelevant
* anyway.
*
* Must take spinlock to ensure we see up-to-date array contents.
*/
@ -3024,9 +3030,11 @@ KnownAssignedXidsDisplay(int trace_level)
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
StringInfoData buf;
int head, tail, i;
int nxids = 0;
StringInfoData buf;
int head,
tail,
i;
int nxids = 0;
tail = pArray->tailKnownAssignedXids;
head = pArray->headKnownAssignedXids;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.104 2010/04/28 16:54:16 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.105 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -303,7 +303,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
* cases. Now, it always throws error instead, so callers need not check
* cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
@ -362,8 +362,8 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("could not create ShmemIndex entry for data structure \"%s\"",
name)));
errmsg("could not create ShmemIndex entry for data structure \"%s\"",
name)));
}
if (*foundPtr)
@ -377,11 +377,11 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
{
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errmsg("ShmemIndex entry size is wrong for data structure"
" \"%s\": expected %lu, actual %lu",
name,
(unsigned long) size,
(unsigned long) result->size)));
(errmsg("ShmemIndex entry size is wrong for data structure"
" \"%s\": expected %lu, actual %lu",
name,
(unsigned long) size,
(unsigned long) result->size)));
}
structPtr = result->location;
}

View File

@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/standby.c,v 1.26 2010/07/03 20:43:58 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/standby.c,v 1.27 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -125,12 +125,12 @@ ShutdownRecoveryTransactionEnvironment(void)
static TimestampTz
GetStandbyLimitTime(void)
{
TimestampTz rtime;
TimestampTz rtime;
bool fromStream;
/*
* The cutoff time is the last WAL data receipt time plus the appropriate
* delay variable. Delay of -1 means wait forever.
* delay variable. Delay of -1 means wait forever.
*/
GetXLogReceiptTime(&rtime, &fromStream);
if (fromStream)
@ -158,7 +158,7 @@ static int standbyWait_us = STANDBY_INITIAL_WAIT_US;
static bool
WaitExceedsMaxStandbyDelay(void)
{
TimestampTz ltime;
TimestampTz ltime;
/* Are we past the limit time? */
ltime = GetStandbyLimitTime();
@ -171,8 +171,8 @@ WaitExceedsMaxStandbyDelay(void)
pg_usleep(standbyWait_us);
/*
* Progressively increase the sleep times, but not to more than 1s,
* since pg_usleep isn't interruptable on some platforms.
* Progressively increase the sleep times, but not to more than 1s, since
* pg_usleep isn't interruptable on some platforms.
*/
standbyWait_us *= 2;
if (standbyWait_us > 1000000)
@ -411,8 +411,8 @@ void
ResolveRecoveryConflictWithBufferPin(void)
{
bool sig_alarm_enabled = false;
TimestampTz ltime;
TimestampTz now;
TimestampTz ltime;
TimestampTz now;
Assert(InHotStandby);
@ -814,10 +814,10 @@ standby_desc(StringInfo buf, uint8 xl_info, char *rec)
* up from a checkpoint and are immediately at our starting point, we
* unconditionally move to STANDBY_INITIALIZED. After this point we
* must do 4 things:
* * move shared nextXid forwards as we see new xids
* * extend the clog and subtrans with each new xid
* * keep track of uncommitted known assigned xids
* * keep track of uncommitted AccessExclusiveLocks
* * move shared nextXid forwards as we see new xids
* * extend the clog and subtrans with each new xid
* * keep track of uncommitted known assigned xids
* * keep track of uncommitted AccessExclusiveLocks
*
* When we see a commit/abort we must remove known assigned xids and locks
* from the completing transaction. Attempted removals that cannot locate
@ -841,11 +841,11 @@ LogStandbySnapshot(TransactionId *oldestActiveXid, TransactionId *nextXid)
/*
* Get details of any AccessExclusiveLocks being held at the moment.
*
* XXX GetRunningTransactionLocks() currently holds a lock on all partitions
* though it is possible to further optimise the locking. By reference
* counting locks and storing the value on the ProcArray entry for each backend
* we can easily tell if any locks need recording without trying to acquire
* the partition locks and scanning the lock table.
* XXX GetRunningTransactionLocks() currently holds a lock on all
* partitions though it is possible to further optimise the locking. By
* reference counting locks and storing the value on the ProcArray entry
* for each backend we can easily tell if any locks need recording without
* trying to acquire the partition locks and scanning the lock table.
*/
locks = GetRunningTransactionLocks(&nlocks);
if (nlocks > 0)
@ -856,10 +856,12 @@ LogStandbySnapshot(TransactionId *oldestActiveXid, TransactionId *nextXid)
* record we write, because standby will open up when it sees this.
*/
running = GetRunningTransactionData();
/*
* The gap between GetRunningTransactionData() and LogCurrentRunningXacts()
* is what most of the fuss is about here, so artifically extending this
* interval is a great way to test the little used parts of the code.
* The gap between GetRunningTransactionData() and
* LogCurrentRunningXacts() is what most of the fuss is about here, so
* artifically extending this interval is a great way to test the little
* used parts of the code.
*/
LogCurrentRunningXacts(running);
@ -910,20 +912,20 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
if (CurrRunningXacts->subxid_overflow)
elog(trace_recovery(DEBUG2),
"snapshot of %u running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt,
recptr.xlogid, recptr.xrecoff,
CurrRunningXacts->oldestRunningXid,
CurrRunningXacts->latestCompletedXid,
CurrRunningXacts->nextXid);
"snapshot of %u running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt,
recptr.xlogid, recptr.xrecoff,
CurrRunningXacts->oldestRunningXid,
CurrRunningXacts->latestCompletedXid,
CurrRunningXacts->nextXid);
else
elog(trace_recovery(DEBUG2),
"snapshot of %u running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt,
recptr.xlogid, recptr.xrecoff,
CurrRunningXacts->oldestRunningXid,
CurrRunningXacts->latestCompletedXid,
CurrRunningXacts->nextXid);
"snapshot of %u running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt,
recptr.xlogid, recptr.xrecoff,
CurrRunningXacts->oldestRunningXid,
CurrRunningXacts->latestCompletedXid,
CurrRunningXacts->nextXid);
}
/*

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.220 2010/07/03 20:43:58 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.221 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1664,6 +1664,7 @@ enable_standby_sig_alarm(TimestampTz now, TimestampTz fin_time, bool deadlock_on
long secs;
int usecs;
struct itimerval timeval;
TimestampDifference(now, statement_fin_time,
&secs, &usecs);
if (secs == 0 && usecs == 0)
@ -1715,15 +1716,15 @@ static bool
CheckStandbyTimeout(void)
{
TimestampTz now;
bool reschedule = false;
bool reschedule = false;
standby_timeout_active = false;
now = GetCurrentTimestamp();
/*
* Reschedule the timer if its not time to wake yet, or if we
* have both timers set and the first one has just been reached.
* Reschedule the timer if its not time to wake yet, or if we have both
* timers set and the first one has just been reached.
*/
if (now >= statement_fin_time)
{
@ -1731,9 +1732,8 @@ CheckStandbyTimeout(void)
{
/*
* We're still waiting when we reach deadlock timeout, so send out
* a request to have other backends check themselves for
* deadlock. Then continue waiting until statement_fin_time,
* if that's set.
* a request to have other backends check themselves for deadlock.
* Then continue waiting until statement_fin_time, if that's set.
*/
SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
deadlock_timeout_active = false;
@ -1764,6 +1764,7 @@ CheckStandbyTimeout(void)
long secs;
int usecs;
struct itimerval timeval;
TimestampDifference(now, statement_fin_time,
&secs, &usecs);
if (secs == 0 && usecs == 0)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.104 2010/06/30 18:10:23 heikki Exp $
* $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.105 2010/07/06 19:18:57 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
@ -356,7 +356,7 @@ HandleFunctionRequest(StringInfo msgBuf)
if ((fid == F_PG_GET_EXPR || fid == F_PG_GET_EXPR_EXT) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("argument to pg_get_expr() must come from system catalogs")));
errmsg("argument to pg_get_expr() must come from system catalogs")));
/*
* Prepare function call info block and insert arguments.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.594 2010/05/12 19:45:02 sriggs Exp $
* $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.595 2010/07/06 19:18:57 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@ -2840,8 +2840,8 @@ RecoveryConflictInterrupt(ProcSignalReason reason)
/*
* All conflicts apart from database cause dynamic errors where the
* command or transaction can be retried at a later point with some
* potential for success. No need to reset this, since
* non-retryable conflict errors are currently FATAL.
* potential for success. No need to reset this, since non-retryable
* conflict errors are currently FATAL.
*/
if (reason == PROCSIG_RECOVERY_CONFLICT_DATABASE)
RecoveryConflictRetryable = false;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/tsearch/ts_typanalyze.c,v 1.9 2010/05/30 21:59:02 tgl Exp $
* $PostgreSQL: pgsql/src/backend/tsearch/ts_typanalyze.c,v 1.10 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -115,13 +115,13 @@ ts_typanalyze(PG_FUNCTION_ARGS)
* language's frequency table, where K is the target number of entries in
* the MCELEM array plus an arbitrary constant, meant to reflect the fact
* that the most common words in any language would usually be stopwords
* so we will not actually see them in the input. We assume that the
* so we will not actually see them in the input. We assume that the
* distribution of word frequencies (including the stopwords) follows Zipf's
* law with an exponent of 1.
*
* Assuming Zipfian distribution, the frequency of the K'th word is equal
* to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of
* words in the language. Putting W as one million, we get roughly 0.07/K.
* words in the language. Putting W as one million, we get roughly 0.07/K.
* Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set
* epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and
* maximum expected hashtable size of about 1000 * (K + 10).
@ -162,10 +162,10 @@ compute_tsvector_stats(VacAttrStats *stats,
TrackItem *item;
/*
* We want statistics_target * 10 lexemes in the MCELEM array. This
* We want statistics_target * 10 lexemes in the MCELEM array. This
* multiplier is pretty arbitrary, but is meant to reflect the fact that
* the number of individual lexeme values tracked in pg_statistic ought
* to be more than the number of values for a simple scalar column.
* the number of individual lexeme values tracked in pg_statistic ought to
* be more than the number of values for a simple scalar column.
*/
num_mcelem = stats->attr->attstattarget * 10;
@ -300,7 +300,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* Construct an array of the interesting hashtable items, that is,
* those meeting the cutoff frequency (s - epsilon)*N. Also identify
* those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@ -308,7 +308,7 @@ compute_tsvector_stats(VacAttrStats *stats,
*/
cutoff_freq = 9 * lexeme_no / bucket_width;
i = hash_get_num_entries(lexemes_tab); /* surely enough space */
i = hash_get_num_entries(lexemes_tab); /* surely enough space */
sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
hash_seq_init(&scan_status, lexemes_tab);
@ -332,9 +332,9 @@ compute_tsvector_stats(VacAttrStats *stats,
num_mcelem, bucket_width, lexeme_no, i, track_len);
/*
* If we obtained more lexemes than we really want, get rid of
* those with least frequencies. The easiest way is to qsort the
* array into descending frequency order and truncate the array.
* If we obtained more lexemes than we really want, get rid of those
* with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
{
@ -383,8 +383,8 @@ compute_tsvector_stats(VacAttrStats *stats,
mcelem_freqs = (float4 *) palloc((num_mcelem + 2) * sizeof(float4));
/*
* See comments above about use of nonnull_cnt as the divisor
* for the final frequency estimates.
* See comments above about use of nonnull_cnt as the divisor for
* the final frequency estimates.
*/
for (i = 0; i < num_mcelem; i++)
{

View File

@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
* $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.170 2010/04/07 21:41:53 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.171 2010/07/06 19:18:58 momjian Exp $
*
*
* Portions Copyright (c) 1999-2010, PostgreSQL Global Development Group
@ -2658,12 +2658,13 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
s += SKIP_THth(n->suffix);
break;
case DCH_Q:
/*
* We ignore 'Q' when converting to date because it is
* unclear which date in the quarter to use, and some
* people specify both quarter and month, so if it was
* honored it might conflict with the supplied month.
* That is also why we don't throw an error.
* We ignore 'Q' when converting to date because it is unclear
* which date in the quarter to use, and some people specify
* both quarter and month, so if it was honored it might
* conflict with the supplied month. That is also why we don't
* throw an error.
*
* We still parse the source string for an integer, but it
* isn't stored anywhere in 'out'.

View File

@ -19,7 +19,7 @@
* Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.29 2010/05/28 18:18:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.30 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -116,10 +116,10 @@ MatchText(char *t, int tlen, char *p, int plen)
* If there are wildcards immediately following the %, we can skip
* over them first, using the idea that any sequence of N _'s and
* one or more %'s is equivalent to N _'s and one % (ie, it will
* match any sequence of at least N text characters). In this
* way we will always run the recursive search loop using a
* pattern fragment that begins with a literal character-to-match,
* thereby not recursing more than we have to.
* match any sequence of at least N text characters). In this way
* we will always run the recursive search loop using a pattern
* fragment that begins with a literal character-to-match, thereby
* not recursing more than we have to.
*/
NextByte(p, plen);
@ -173,7 +173,7 @@ MatchText(char *t, int tlen, char *p, int plen)
int matched = MatchText(t, tlen, p, plen);
if (matched != LIKE_FALSE)
return matched; /* TRUE or ABORT */
return matched; /* TRUE or ABORT */
}
NextChar(t, tlen);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.77 2010/06/13 17:43:13 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.78 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -314,15 +314,17 @@ oidparse(Node *node)
case T_Integer:
return intVal(node);
case T_Float:
/*
* Values too large for int4 will be represented as Float constants
* by the lexer. Accept these if they are valid OID strings.
* Values too large for int4 will be represented as Float
* constants by the lexer. Accept these if they are valid OID
* strings.
*/
return oidin_subr(strVal(node), NULL);
default:
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node));
}
return InvalidOid; /* keep compiler quiet */
return InvalidOid; /* keep compiler quiet */
}

View File

@ -4,7 +4,7 @@
*
* Portions Copyright (c) 2002-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.56 2010/04/26 14:17:52 momjian Exp $
* $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.57 2010/07/06 19:18:58 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@ -44,7 +44,7 @@
*
* FYI, The Open Group locale standard is defined here:
*
* http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap07.html
* http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap07.html
*----------
*/
@ -398,13 +398,13 @@ free_struct_lconv(struct lconv * s)
static char *
db_encoding_strdup(int encoding, const char *str)
{
char *pstr;
char *mstr;
char *pstr;
char *mstr;
/* convert the string to the database encoding */
pstr = (char *) pg_do_encoding_conversion(
(unsigned char *) str, strlen(str),
encoding, GetDatabaseEncoding());
(unsigned char *) str, strlen(str),
encoding, GetDatabaseEncoding());
mstr = strdup(pstr);
if (pstr != str)
pfree(pstr);
@ -428,6 +428,7 @@ PGLC_localeconv(void)
char *grouping;
char *thousands_sep;
int encoding;
#ifdef WIN32
char *save_lc_ctype;
#endif
@ -448,27 +449,27 @@ PGLC_localeconv(void)
save_lc_numeric = pstrdup(save_lc_numeric);
#ifdef WIN32
/*
* Ideally, monetary and numeric local symbols could be returned in
* any server encoding. Unfortunately, the WIN32 API does not allow
* setlocale() to return values in a codepage/CTYPE that uses more
* than two bytes per character, like UTF-8:
*
* http://msdn.microsoft.com/en-us/library/x99tb11d.aspx
*
* Evidently, LC_CTYPE allows us to control the encoding used
* for strings returned by localeconv(). The Open Group
* standard, mentioned at the top of this C file, doesn't
* explicitly state this.
*
* Therefore, we set LC_CTYPE to match LC_NUMERIC or LC_MONETARY
* (which cannot be UTF8), call localeconv(), and then convert from
* the numeric/monitary LC_CTYPE to the server encoding. One
* example use of this is for the Euro symbol.
*
* Perhaps someday we will use GetLocaleInfoW() which returns values
* in UTF16 and convert from that.
*/
/*
* Ideally, monetary and numeric local symbols could be returned in any
* server encoding. Unfortunately, the WIN32 API does not allow
* setlocale() to return values in a codepage/CTYPE that uses more than
* two bytes per character, like UTF-8:
*
* http://msdn.microsoft.com/en-us/library/x99tb11d.aspx
*
* Evidently, LC_CTYPE allows us to control the encoding used for strings
* returned by localeconv(). The Open Group standard, mentioned at the
* top of this C file, doesn't explicitly state this.
*
* Therefore, we set LC_CTYPE to match LC_NUMERIC or LC_MONETARY (which
* cannot be UTF8), call localeconv(), and then convert from the
* numeric/monitary LC_CTYPE to the server encoding. One example use of
* this is for the Euro symbol.
*
* Perhaps someday we will use GetLocaleInfoW() which returns values in
* UTF16 and convert from that.
*/
/* save user's value of ctype locale */
save_lc_ctype = setlocale(LC_CTYPE, NULL);
@ -567,6 +568,7 @@ strftime_win32(char *dst, size_t dstlen, const wchar_t *format, const struct tm
len = wcsftime(wbuf, MAX_L10N_DATA, format, tm);
if (len == 0)
/*
* strftime call failed - return 0 with the contents of dst
* unspecified
@ -595,7 +597,6 @@ strftime_win32(char *dst, size_t dstlen, const wchar_t *format, const struct tm
/* redefine strftime() */
#define strftime(a,b,c,d) strftime_win32(a,b,L##c,d)
#endif /* WIN32 */
@ -611,6 +612,7 @@ cache_locale_time(void)
char buf[MAX_L10N_DATA];
char *ptr;
int i;
#ifdef WIN32
char *save_lc_ctype;
#endif
@ -627,13 +629,14 @@ cache_locale_time(void)
save_lc_time = pstrdup(save_lc_time);
#ifdef WIN32
/*
* On WIN32, there is no way to get locale-specific time values in a
* specified locale, like we do for monetary/numeric. We can only get
* CP_ACP (see strftime_win32) or UTF16. Therefore, we get UTF16 and
* convert it to the database locale. However, wcsftime() internally
* uses LC_CTYPE, so we set it here. See the WIN32 comment near the
* top of PGLC_localeconv().
* convert it to the database locale. However, wcsftime() internally uses
* LC_CTYPE, so we set it here. See the WIN32 comment near the top of
* PGLC_localeconv().
*/
/* save user's value of ctype locale */

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/utils/adt/xml.c,v 1.97 2010/03/03 17:29:45 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/adt/xml.c,v 1.98 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -846,7 +846,7 @@ xml_is_document(xmltype *arg)
* pg_xml_init --- set up for use of libxml
*
* This should be called by each function that is about to use libxml
* facilities. It has two responsibilities: verify compatibility with the
* facilities. It has two responsibilities: verify compatibility with the
* loaded libxml version (done on first call in a session) and establish
* or re-establish our libxml error handler. The latter needs to be done
* anytime we might have passed control to add-on modules (eg libperl) which
@ -1121,7 +1121,7 @@ static bool
print_xml_decl(StringInfo buf, const xmlChar *version,
pg_enc encoding, int standalone)
{
pg_xml_init(); /* why is this here? */
pg_xml_init(); /* why is this here? */
if ((version && strcmp((char *) version, PG_XML_DEFAULT_VERSION) != 0)
|| (encoding && encoding != PG_UTF8)
@ -1338,8 +1338,8 @@ xml_ereport(int level, int sqlcode, const char *msg)
/*
* It might seem that we should just pass xml_err_buf->data directly to
* errdetail. However, we want to clean out xml_err_buf before throwing
* error, in case there is another function using libxml further down
* the call stack.
* error, in case there is another function using libxml further down the
* call stack.
*/
if (xml_err_buf->len > 0)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.152 2010/04/20 23:48:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.153 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -994,6 +994,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
switch (cache->id)
{
case INDEXRELID:
/*
* Rather than tracking exactly which indexes have to be loaded
* before we can use indexscans (which changes from time to time),
@ -1006,6 +1007,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
case AMOID:
case AMNAME:
/*
* Always do heap scans in pg_am, because it's so small there's
* not much point in an indexscan anyway. We *must* do this when
@ -1017,6 +1019,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
case AUTHNAME:
case AUTHOID:
case AUTHMEMMEMROLE:
/*
* Protect authentication lookups occurring before relcache has
* collected entries for shared indexes.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.310 2010/04/20 23:48:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.311 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1832,7 +1832,7 @@ RelationDestroyRelation(Relation relation)
*
* NB: when rebuilding, we'd better hold some lock on the relation,
* else the catalog data we need to read could be changing under us.
* Also, a rel to be rebuilt had better have refcnt > 0. This is because
* Also, a rel to be rebuilt had better have refcnt > 0. This is because
* an sinval reset could happen while we're accessing the catalogs, and
* the rel would get blown away underneath us by RelationCacheInvalidate
* if it has zero refcnt.
@ -1847,8 +1847,8 @@ RelationClearRelation(Relation relation, bool rebuild)
Oid old_reltype = relation->rd_rel->reltype;
/*
* As per notes above, a rel to be rebuilt MUST have refcnt > 0; while
* of course it would be a bad idea to blow away one with nonzero refcnt.
* As per notes above, a rel to be rebuilt MUST have refcnt > 0; while of
* course it would be a bad idea to blow away one with nonzero refcnt.
*/
Assert(rebuild ?
!RelationHasReferenceCountZero(relation) :
@ -2051,9 +2051,9 @@ RelationFlushRelation(Relation relation)
* forget the "new" status of the relation, which is a useful
* optimization to have. Ditto for the new-relfilenode status.
*
* The rel could have zero refcnt here, so temporarily increment
* the refcnt to ensure it's safe to rebuild it. We can assume that
* the current transaction has some lock on the rel already.
* The rel could have zero refcnt here, so temporarily increment the
* refcnt to ensure it's safe to rebuild it. We can assume that the
* current transaction has some lock on the rel already.
*/
RelationIncrementReferenceCount(relation);
RelationClearRelation(relation, true);
@ -2064,7 +2064,7 @@ RelationFlushRelation(Relation relation)
/*
* Pre-existing rels can be dropped from the relcache if not open.
*/
bool rebuild = !RelationHasReferenceCountZero(relation);
bool rebuild = !RelationHasReferenceCountZero(relation);
RelationClearRelation(relation, rebuild);
}
@ -2775,8 +2775,8 @@ RelationCacheInitializePhase2(void)
RelationMapInitializePhase2();
/*
* In bootstrap mode, the shared catalogs aren't there yet anyway,
* so do nothing.
* In bootstrap mode, the shared catalogs aren't there yet anyway, so do
* nothing.
*/
if (IsBootstrapProcessingMode())
return;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.212 2010/04/26 10:52:00 rhaas Exp $
* $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.213 2010/07/06 19:18:58 momjian Exp $
*
*
*-------------------------------------------------------------------------
@ -218,21 +218,22 @@ PerformAuthentication(Port *port)
elog(FATAL, "could not disable timer for authorization timeout");
/*
* Log connection for streaming replication even if Log_connections disabled.
* Log connection for streaming replication even if Log_connections
* disabled.
*/
if (am_walsender)
{
if (port->remote_port[0])
ereport(LOG,
(errmsg("replication connection authorized: user=%s host=%s port=%s",
port->user_name,
port->remote_host,
port->remote_port)));
(errmsg("replication connection authorized: user=%s host=%s port=%s",
port->user_name,
port->remote_host,
port->remote_port)));
else
ereport(LOG,
(errmsg("replication connection authorized: user=%s host=%s",
port->user_name,
port->remote_host)));
port->user_name,
port->remote_host)));
}
else if (Log_connections)
ereport(LOG,
@ -515,8 +516,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
if (IsUnderPostmaster)
{
/*
* The postmaster already started the XLOG machinery, but we need
* to call InitXLOGAccess(), if the system isn't in hot-standby mode.
* The postmaster already started the XLOG machinery, but we need to
* call InitXLOGAccess(), if the system isn't in hot-standby mode.
* This is handled by calling RecoveryInProgress and ignoring the
* result.
*/
@ -525,9 +526,9 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
else
{
/*
* We are either a bootstrap process or a standalone backend.
* Either way, start up the XLOG machinery, and register to have it
* closed down at exit.
* We are either a bootstrap process or a standalone backend. Either
* way, start up the XLOG machinery, and register to have it closed
* down at exit.
*/
StartupXLOG();
on_shmem_exit(ShutdownXLOG, 0);
@ -618,8 +619,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
}
/*
* If we're trying to shut down, only superusers can connect, and
* new replication connections are not allowed.
* If we're trying to shut down, only superusers can connect, and new
* replication connections are not allowed.
*/
if ((!am_superuser || am_walsender) &&
MyProcPort != NULL &&
@ -628,18 +629,18 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
if (am_walsender)
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new replication connections are not allowed during database shutdown")));
errmsg("new replication connections are not allowed during database shutdown")));
else
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to connect during database shutdown")));
errmsg("must be superuser to connect during database shutdown")));
}
/*
* The last few connections slots are reserved for superusers.
* Although replication connections currently require superuser
* privileges, we don't allow them to consume the reserved slots,
* which are intended for interactive use.
* The last few connections slots are reserved for superusers. Although
* replication connections currently require superuser privileges, we
* don't allow them to consume the reserved slots, which are intended for
* interactive use.
*/
if ((!am_superuser || am_walsender) &&
ReservedBackends > 0 &&

View File

@ -4,7 +4,7 @@
*
* Tatsuo Ishii
*
* $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.95 2010/02/27 03:55:52 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.96 2010/07/06 19:18:58 momjian Exp $
*/
#include "postgres.h"
@ -1016,7 +1016,7 @@ pgwin32_toUTF16(const char *str, int len, int *utf16len)
{
utf16 = (WCHAR *) palloc(sizeof(WCHAR) * (len + 1));
dstlen = MultiByteToWideChar(codepage, 0, str, len, utf16, len);
utf16[dstlen] = L'\0';
utf16[dstlen] = L '\0';
}
else
{
@ -1029,7 +1029,7 @@ pgwin32_toUTF16(const char *str, int len, int *utf16len)
utf16 = (WCHAR *) palloc(sizeof(WCHAR) * (len + 1));
dstlen = MultiByteToWideChar(CP_UTF8, 0, utf8, len, utf16, len);
utf16[dstlen] = L'\0';
utf16[dstlen] = L '\0';
if (utf8 != str)
pfree(utf8);

View File

@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.559 2010/07/03 21:23:58 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.560 2010/07/06 19:18:58 momjian Exp $
*
*--------------------------------------------------------------------
*/
@ -2893,7 +2893,7 @@ static void ShowAllGUCConfig(DestReceiver *dest);
static char *_ShowOption(struct config_generic * record, bool use_units);
static bool is_newvalue_equal(struct config_generic * record, const char *newvalue);
static bool validate_option_array_item(const char *name, const char *value,
bool skipIfNoPermissions);
bool skipIfNoPermissions);
/*
@ -5905,12 +5905,13 @@ define_custom_variable(struct config_generic * variable)
case PGC_S_DATABASE:
case PGC_S_USER:
case PGC_S_DATABASE_USER:
/*
* The existing value came from an ALTER ROLE/DATABASE SET command.
* We can assume that at the time the command was issued, we
* checked that the issuing user was superuser if the variable
* requires superuser privileges to set. So it's safe to
* use SUSET context here.
* The existing value came from an ALTER ROLE/DATABASE SET
* command. We can assume that at the time the command was issued,
* we checked that the issuing user was superuser if the variable
* requires superuser privileges to set. So it's safe to use
* SUSET context here.
*/
phcontext = PGC_SUSET;
break;
@ -5918,9 +5919,10 @@ define_custom_variable(struct config_generic * variable)
case PGC_S_CLIENT:
case PGC_S_SESSION:
default:
/*
* We must assume that the value came from an untrusted user,
* even if the current_user is a superuser.
* We must assume that the value came from an untrusted user, even
* if the current_user is a superuser.
*/
phcontext = PGC_USERSET;
break;
@ -7443,7 +7445,7 @@ GUCArrayReset(ArrayType *array)
* Validate a proposed option setting for GUCArrayAdd/Delete/Reset.
*
* name is the option name. value is the proposed value for the Add case,
* or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* not an error to have no permissions to set the option.
*
* Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not
@ -7465,19 +7467,19 @@ validate_option_array_item(const char *name, const char *value,
* SUSET and user is superuser).
*
* name is not known, but exists or can be created as a placeholder
* (implying it has a prefix listed in custom_variable_classes).
* We allow this case if you're a superuser, otherwise not. Superusers
* are assumed to know what they're doing. We can't allow it for other
* users, because when the placeholder is resolved it might turn out to
* be a SUSET variable; define_custom_variable assumes we checked that.
* (implying it has a prefix listed in custom_variable_classes). We allow
* this case if you're a superuser, otherwise not. Superusers are assumed
* to know what they're doing. We can't allow it for other users, because
* when the placeholder is resolved it might turn out to be a SUSET
* variable; define_custom_variable assumes we checked that.
*
* name is not known and can't be created as a placeholder. Throw error,
* unless skipIfNoPermissions is true, in which case return FALSE.
* (It's tempting to allow this case to superusers, if the name is
* qualified but not listed in custom_variable_classes. That would
* ease restoring of dumps containing ALTER ROLE/DATABASE SET. However,
* it's not clear that this usage justifies such a loss of error checking.
* You can always fix custom_variable_classes before you restore.)
* unless skipIfNoPermissions is true, in which case return FALSE. (It's
* tempting to allow this case to superusers, if the name is qualified but
* not listed in custom_variable_classes. That would ease restoring of
* dumps containing ALTER ROLE/DATABASE SET. However, it's not clear that
* this usage justifies such a loss of error checking. You can always fix
* custom_variable_classes before you restore.)
*/
gconf = find_option(name, true, WARNING);
if (!gconf)
@ -7487,7 +7489,7 @@ validate_option_array_item(const char *name, const char *value,
return false;
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("unrecognized configuration parameter \"%s\"", name)));
errmsg("unrecognized configuration parameter \"%s\"", name)));
}
if (gconf->flags & GUC_CUSTOM_PLACEHOLDER)
@ -7507,9 +7509,9 @@ validate_option_array_item(const char *name, const char *value,
/* manual permissions check so we can avoid an error being thrown */
if (gconf->context == PGC_USERSET)
/* ok */ ;
/* ok */ ;
else if (gconf->context == PGC_SUSET && superuser())
/* ok */ ;
/* ok */ ;
else if (skipIfNoPermissions)
return false;
/* if a permissions error should be thrown, let set_config_option do it */

View File

@ -5,7 +5,7 @@
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
* $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.41 2010/05/27 19:19:38 tgl Exp $
* $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.42 2010/07/06 19:18:59 momjian Exp $
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
* various details abducted from various places
@ -94,7 +94,7 @@ static size_t ps_buffer_size; /* space determined at run time */
static size_t last_status_len; /* use to minimize length of clobber */
#endif /* PS_USE_CLOBBER_ARGV */
static size_t ps_buffer_cur_len; /* nominal strlen(ps_buffer) */
static size_t ps_buffer_cur_len; /* nominal strlen(ps_buffer) */
static size_t ps_buffer_fixed_size; /* size of the constant prefix */

View File

@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.119 2010/07/05 09:27:17 heikki Exp $
* $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.120 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -775,9 +775,9 @@ AtCleanup_Portals(void)
}
/*
* If a portal is still pinned, forcibly unpin it. PortalDrop will
* not let us drop the portal otherwise. Whoever pinned the portal
* was interrupted by the abort too and won't try to use it anymore.
* If a portal is still pinned, forcibly unpin it. PortalDrop will not
* let us drop the portal otherwise. Whoever pinned the portal was
* interrupted by the abort too and won't try to use it anymore.
*/
if (portal->portalPinned)
portal->portalPinned = false;

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.186 2010/06/28 02:07:02 tgl Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.187 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -2989,8 +2989,8 @@ ReadHead(ArchiveHandle *AH)
/*
* If we haven't already read the header, do so.
*
* NB: this code must agree with _discoverArchiveFormat(). Maybe find
* a way to unify the cases?
* NB: this code must agree with _discoverArchiveFormat(). Maybe find a
* way to unify the cases?
*/
if (!AH->readHeader)
{
@ -3085,8 +3085,8 @@ checkSeek(FILE *fp)
pgoff_t tpos;
/*
* If pgoff_t is wider than long, we must have "real" fseeko and not
* an emulation using fseek. Otherwise report no seek capability.
* If pgoff_t is wider than long, we must have "real" fseeko and not an
* emulation using fseek. Otherwise report no seek capability.
*/
#ifndef HAVE_FSEEKO
if (sizeof(pgoff_t) > sizeof(long))
@ -3100,7 +3100,7 @@ checkSeek(FILE *fp)
return false;
/*
* Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
* Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
* this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
* successful no-op even on files that are otherwise unseekable.
*/

View File

@ -19,7 +19,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.46 2010/06/28 02:07:02 tgl Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.47 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -448,9 +448,9 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
if (!ctx->hasSeek || tctx->dataState == K_OFFSET_POS_NOT_SET)
{
/*
* We cannot seek directly to the desired block. Instead, skip
* over block headers until we find the one we want. This could
* fail if we are asked to restore items out-of-order.
* We cannot seek directly to the desired block. Instead, skip over
* block headers until we find the one we want. This could fail if we
* are asked to restore items out-of-order.
*/
_readBlockHeader(AH, &blkType, &id);
@ -496,9 +496,9 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
else if (!ctx->hasSeek)
die_horribly(AH, modulename, "could not find block ID %d in archive -- "
"possibly due to out-of-order restore request, "
"which cannot be handled due to non-seekable input file\n",
"which cannot be handled due to non-seekable input file\n",
te->dumpId);
else /* huh, the dataPos led us to EOF? */
else /* huh, the dataPos led us to EOF? */
die_horribly(AH, modulename, "could not find block ID %d in archive -- "
"possibly corrupt archive\n",
te->dumpId);
@ -836,9 +836,9 @@ _CloseArchive(ArchiveHandle *AH)
/*
* If possible, re-write the TOC in order to update the data offset
* information. This is not essential, as pg_restore can cope in
* most cases without it; but it can make pg_restore significantly
* faster in some situations (especially parallel restore).
* information. This is not essential, as pg_restore can cope in most
* cases without it; but it can make pg_restore significantly faster
* in some situations (especially parallel restore).
*/
if (ctx->hasSeek &&
fseeko(AH->FH, tpos, SEEK_SET) == 0)

View File

@ -25,7 +25,7 @@
* http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.580 2010/05/15 21:41:16 tgl Exp $
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.581 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -3957,8 +3957,8 @@ getIndexes(TableInfo tblinfo[], int numTables)
* find one, create a CONSTRAINT entry linked to the INDEX entry. We
* assume an index won't have more than one internal dependency.
*
* As of 9.0 we don't need to look at pg_depend but can check for
* a match to pg_constraint.conindid. The check on conrelid is
* As of 9.0 we don't need to look at pg_depend but can check for a
* match to pg_constraint.conindid. The check on conrelid is
* redundant but useful because that column is indexed while conindid
* is not.
*/

View File

@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.220 2010/05/21 17:37:44 rhaas Exp $
* $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.221 2010/07/06 19:18:59 momjian Exp $
*/
#include "postgres_fe.h"
#include "command.h"
@ -659,7 +659,7 @@ exec_command(const char *cmd,
len = strlen(opt);
while (len > 0 &&
(isspace((unsigned char) opt[len - 1])
|| opt[len - 1] == ';'))
|| opt[len - 1] == ';'))
opt[--len] = '\0';
}

View File

@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.145 2010/05/28 20:02:32 tgl Exp $
* $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.146 2010/07/06 19:18:59 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
@ -1108,18 +1108,18 @@ ExecQueryUsingCursor(const char *query, double *elapsed_msec)
/*
* Make sure to flush the output stream, so intermediate results are
* visible to the client immediately. We check the results because
* if the pager dies/exits/etc, there's no sense throwing more data
* at it.
* visible to the client immediately. We check the results because if
* the pager dies/exits/etc, there's no sense throwing more data at
* it.
*/
flush_error = fflush(pset.queryFout);
/*
* Check if we are at the end, if a cancel was pressed, or if
* there were any errors either trying to flush out the results,
* or more generally on the output stream at all. If we hit any
* errors writing things to the stream, we presume $PAGER has
* disappeared and stop bothering to pull down more data.
* Check if we are at the end, if a cancel was pressed, or if there
* were any errors either trying to flush out the results, or more
* generally on the output stream at all. If we hit any errors
* writing things to the stream, we presume $PAGER has disappeared and
* stop bothering to pull down more data.
*/
if (ntuples < pset.fetch_count || cancel_pressed || flush_error ||
ferror(pset.queryFout))

View File

@ -8,7 +8,7 @@
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.241 2010/03/11 21:29:32 tgl Exp $
* $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.242 2010/07/06 19:18:59 momjian Exp $
*/
#include "postgres_fe.h"
@ -1414,13 +1414,13 @@ describeOneTableDetails(const char *schemaname,
if (pset.sversion >= 90000)
appendPQExpBuffer(&buf,
" (NOT i.indimmediate) AND "
"EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"WHERE conrelid = i.indrelid AND "
"conindid = i.indexrelid AND "
"contype IN ('p','u','x') AND "
"condeferrable) AS condeferrable,\n"
" (NOT i.indimmediate) AND "
"EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"WHERE conrelid = i.indrelid AND "
"conindid = i.indexrelid AND "
"contype IN ('p','u','x') AND "
@ -1545,12 +1545,12 @@ describeOneTableDetails(const char *schemaname,
appendPQExpBuffer(&buf, "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true),\n ");
if (pset.sversion >= 90000)
appendPQExpBuffer(&buf,
"pg_catalog.pg_get_constraintdef(con.oid, true), "
"pg_catalog.pg_get_constraintdef(con.oid, true), "
"contype, condeferrable, condeferred");
else
appendPQExpBuffer(&buf,
"null AS constraintdef, null AS contype, "
"false AS condeferrable, false AS condeferred");
"false AS condeferrable, false AS condeferred");
if (pset.sversion >= 80000)
appendPQExpBuffer(&buf, ", c2.reltablespace");
appendPQExpBuffer(&buf,
@ -1621,7 +1621,7 @@ describeOneTableDetails(const char *schemaname,
/* Print tablespace of the index on the same line */
if (pset.sversion >= 80000)
add_tablespace_footer(&cont, 'i',
atooid(PQgetvalue(result, i, 10)),
atooid(PQgetvalue(result, i, 10)),
false);
}
}

View File

@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.127 2010/05/09 18:17:47 tgl Exp $
* $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.128 2010/07/06 19:19:00 momjian Exp $
*/
#include "postgres_fe.h"
@ -928,14 +928,14 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
/* spaces first */
fprintf(fout, "%*s", width_wrap[j] - chars_to_output, "");
fputnbytes(fout,
(char *) (this_line->ptr + bytes_output[j]),
(char *) (this_line->ptr + bytes_output[j]),
bytes_to_output);
}
else /* Left aligned cell */
{
/* spaces second */
fputnbytes(fout,
(char *) (this_line->ptr + bytes_output[j]),
(char *) (this_line->ptr + bytes_output[j]),
bytes_to_output);
}
@ -2152,7 +2152,7 @@ printTableAddCell(printTableContent *const content, const char *cell,
{
if (content->cellmustfree == NULL)
content->cellmustfree = pg_local_calloc(
content->ncolumns * content->nrows + 1, sizeof(bool));
content->ncolumns * content->nrows + 1, sizeof(bool));
content->cellmustfree[content->cellsadded] = true;
}
@ -2220,7 +2220,8 @@ printTableCleanup(printTableContent *const content)
{
if (content->cellmustfree)
{
int i;
int i;
for (i = 0; i < content->nrows * content->ncolumns; i++)
{
if (content->cellmustfree[i])

View File

@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.45 2010/03/01 20:55:45 heikki Exp $
* $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.46 2010/07/06 19:19:00 momjian Exp $
*/
#ifndef PRINT_H
#define PRINT_H
@ -158,7 +158,7 @@ extern void printTableInit(printTableContent *const content,
extern void printTableAddHeader(printTableContent *const content,
const char *header, const bool translate, const char align);
extern void printTableAddCell(printTableContent *const content,
const char *cell, const bool translate, const bool mustfree);
const char *cell, const bool translate, const bool mustfree);
extern void printTableAddFooter(printTableContent *const content,
const char *footer);
extern void printTableSetFooter(printTableContent *const content,

View File

@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.199 2010/06/07 02:59:02 itagaki Exp $
* $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.200 2010/07/06 19:19:00 momjian Exp $
*/
/*----------------------------------------------------------------------
@ -710,7 +710,7 @@ psql_completion(char *text, int start, int end)
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
(pg_strcasecmp(prev2_wd, "AGGREGATE") == 0 ||
pg_strcasecmp(prev2_wd, "FUNCTION") == 0))
COMPLETE_WITH_CONST("(");
COMPLETE_WITH_CONST("(");
/* ALTER AGGREGATE,FUNCTION <name> (...) */
else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
(pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 ||

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.134 2010/03/28 09:27:02 sriggs Exp $
* $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.135 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -314,9 +314,10 @@ typedef struct xl_btree_split
*/
typedef struct xl_btree_delete
{
RelFileNode node; /* RelFileNode of the index */
RelFileNode node; /* RelFileNode of the index */
BlockNumber block;
RelFileNode hnode; /* RelFileNode of the heap the index currently points at */
RelFileNode hnode; /* RelFileNode of the heap the index currently
* points at */
int nitems;
/* TARGET OFFSET NUMBERS FOLLOW AT THE END */
@ -589,9 +590,9 @@ extern void _bt_relbuf(Relation rel, Buffer buf);
extern void _bt_pageinit(Page page, Size size);
extern bool _bt_page_recyclable(Page page);
extern void _bt_delitems_delete(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, Relation heapRel);
OffsetNumber *itemnos, int nitems, Relation heapRel);
extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed);
OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed);
extern int _bt_pagedel(Relation rel, Buffer buf, BTStack stack);
/*

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.186 2010/03/30 21:58:11 tgl Exp $
* $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.187 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -197,8 +197,8 @@ typedef struct PlannerInfo
double tuple_fraction; /* tuple_fraction passed to query_planner */
bool hasInheritedTarget; /* true if parse->resultRelation is an
* inheritance child rel */
bool hasInheritedTarget; /* true if parse->resultRelation is an
* inheritance child rel */
bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */
bool hasHavingQual; /* true if havingQual was non-null */
bool hasPseudoConstantQuals; /* true if any RestrictInfo has

View File

@ -1,4 +1,4 @@
/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.95 2010/05/28 16:34:15 itagaki Exp $ */
/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.96 2010/07/06 19:19:00 momjian Exp $ */
#if defined(_MSC_VER) || defined(__BORLANDC__)
#define WIN32_ONLY_COMPILER
@ -63,7 +63,6 @@
#else
#define PGDLLEXPORT __declspec (dllimport)
#endif
#else /* not CYGWIN, not MSVC, not MingW */
#define PGDLLIMPORT
#define PGDLLEXPORT

View File

@ -5,7 +5,7 @@
*
* Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/include/replication/walprotocol.h,v 1.1 2010/06/03 22:17:32 tgl Exp $
* $PostgreSQL: pgsql/src/include/replication/walprotocol.h,v 1.2 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -36,11 +36,11 @@ typedef struct
XLogRecPtr walEnd;
/* Sender's system clock at the time of transmission */
TimestampTz sendTime;
TimestampTz sendTime;
} WalDataMessageHeader;
/*
* Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
* Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
*
* We don't have a good idea of what a good value would be; there's some
* overhead per message in both walsender and walreceiver, but on the other

View File

@ -5,7 +5,7 @@
*
* Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/src/include/replication/walreceiver.h,v 1.10 2010/07/03 20:43:58 tgl Exp $
* $PostgreSQL: pgsql/src/include/replication/walreceiver.h,v 1.11 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -42,7 +42,8 @@ typedef struct
{
/*
* PID of currently active walreceiver process, its current state and
* start time (actually, the time at which it was requested to be started).
* start time (actually, the time at which it was requested to be
* started).
*/
pid_t pid;
WalRcvState walRcvState;
@ -51,16 +52,16 @@ typedef struct
/*
* receivedUpto-1 is the last byte position that has already been
* received. When startup process starts the walreceiver, it sets
* receivedUpto to the point where it wants the streaming to begin.
* After that, walreceiver updates this whenever it flushes the received
* WAL to disk.
* receivedUpto to the point where it wants the streaming to begin. After
* that, walreceiver updates this whenever it flushes the received WAL to
* disk.
*/
XLogRecPtr receivedUpto;
/*
* latestChunkStart is the starting byte position of the current "batch"
* of received WAL. It's actually the same as the previous value of
* receivedUpto before the last flush to disk. Startup process can use
* receivedUpto before the last flush to disk. Startup process can use
* this to detect whether it's keeping up or not.
*/
XLogRecPtr latestChunkStart;

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.31 2010/05/15 20:01:32 rhaas Exp $
* $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.32 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -23,7 +23,7 @@
typedef enum
{
PMSIGNAL_RECOVERY_STARTED, /* recovery has started */
PMSIGNAL_BEGIN_HOT_STANDBY, /* begin Hot Standby */
PMSIGNAL_BEGIN_HOT_STANDBY, /* begin Hot Standby */
PMSIGNAL_WAKEN_ARCHIVER, /* send a NOTIFY signal to xlog archiver */
PMSIGNAL_ROTATE_LOGFILE, /* send SIGUSR1 to syslogger to rotate logfile */
PMSIGNAL_START_AUTOVAC_LAUNCHER, /* start an autovacuum launcher */

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.122 2010/05/26 19:52:52 sriggs Exp $
* $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.123 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -200,7 +200,7 @@ extern bool disable_sig_alarm(bool is_statement_timeout);
extern void handle_sig_alarm(SIGNAL_ARGS);
extern bool enable_standby_sig_alarm(TimestampTz now,
TimestampTz fin_time, bool deadlock_only);
TimestampTz fin_time, bool deadlock_only);
extern bool disable_standby_sig_alarm(void);
extern void handle_standby_sig_alarm(SIGNAL_ARGS);

Some files were not shown because too many files have changed in this diff Show More