pgindent run. Make it all clean.

This commit is contained in:
Bruce Momjian 2001-03-22 04:01:46 +00:00
parent 6cf8707b82
commit 9e1552607a
555 changed files with 32514 additions and 28110 deletions

View File

@ -4,76 +4,81 @@
#include "utils/elog.h"
static char * PARSE_BUFFER;
static char * PARSE_BUFFER_PTR;
static char *PARSE_BUFFER;
static char *PARSE_BUFFER_PTR;
static unsigned int PARSE_BUFFER_SIZE;
static unsigned int SCANNER_POS;
void set_parse_buffer( char* s );
void reset_parse_buffer( void );
int read_parse_buffer( void );
char * parse_buffer( void );
char * parse_buffer_ptr( void );
unsigned int parse_buffer_curr_char( void );
unsigned int parse_buffer_size( void );
unsigned int parse_buffer_pos( void );
void set_parse_buffer(char *s);
void reset_parse_buffer(void);
int read_parse_buffer(void);
char *parse_buffer(void);
char *parse_buffer_ptr(void);
unsigned int parse_buffer_curr_char(void);
unsigned int parse_buffer_size(void);
unsigned int parse_buffer_pos(void);
extern void cube_flush_scanner_buffer(void); /* defined in cubescan.l */
void set_parse_buffer( char* s )
void
set_parse_buffer(char *s)
{
PARSE_BUFFER = s;
PARSE_BUFFER_SIZE = strlen(s);
if ( PARSE_BUFFER_SIZE == 0 ) {
if (PARSE_BUFFER_SIZE == 0)
elog(ERROR, "cube_in: can't parse an empty string");
}
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
}
void reset_parse_buffer( void )
void
reset_parse_buffer(void)
{
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
cube_flush_scanner_buffer();
}
int read_parse_buffer( void )
int
read_parse_buffer(void)
{
int c;
/*
c = *PARSE_BUFFER_PTR++;
SCANNER_POS++;
* c = *PARSE_BUFFER_PTR++; SCANNER_POS++;
*/
c = PARSE_BUFFER[SCANNER_POS];
if(SCANNER_POS < PARSE_BUFFER_SIZE)
if (SCANNER_POS < PARSE_BUFFER_SIZE)
SCANNER_POS++;
return c;
}
char * parse_buffer( void )
char *
parse_buffer(void)
{
return PARSE_BUFFER;
}
unsigned int parse_buffer_curr_char( void )
unsigned int
parse_buffer_curr_char(void)
{
return PARSE_BUFFER[SCANNER_POS];
}
char * parse_buffer_ptr( void )
char *
parse_buffer_ptr(void)
{
return PARSE_BUFFER_PTR;
}
unsigned int parse_buffer_pos( void )
unsigned int
parse_buffer_pos(void)
{
return SCANNER_POS;
}
unsigned int parse_buffer_size( void )
unsigned int
parse_buffer_size(void)
{
return PARSE_BUFFER_SIZE;
}

View File

@ -1,8 +1,8 @@
extern void set_parse_buffer( char* s );
extern void reset_parse_buffer( void );
extern int read_parse_buffer( void );
extern char * parse_buffer( void );
extern char * parse_buffer_ptr( void );
extern unsigned int parse_buffer_curr_char( void );
extern unsigned int parse_buffer_pos( void );
extern unsigned int parse_buffer_size( void );
extern void set_parse_buffer(char *s);
extern void reset_parse_buffer(void);
extern int read_parse_buffer(void);
extern char *parse_buffer(void);
extern char *parse_buffer_ptr(void);
extern unsigned int parse_buffer_curr_char(void);
extern unsigned int parse_buffer_pos(void);
extern unsigned int parse_buffer_size(void);

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
typedef struct NDBOX {
typedef struct NDBOX
{
unsigned int size; /* required to be a Postgres varlena type */
unsigned int dim;
float x[1];

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for managed LargeObjects.
*
* $Header: /cvsroot/pgsql/contrib/lo/lo.c,v 1.7 2001/02/10 02:31:25 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/lo/lo.c,v 1.8 2001/03/22 03:59:09 momjian Exp $
*
*/

View File

@ -13,7 +13,8 @@
#include "libpq-fe.h"
/* these are the opts structures for command line params */
struct options {
struct options
{
int getdatabase;
int gettable;
int getoid;
@ -38,7 +39,7 @@ struct options {
/* function prototypes */
void get_opts(int, char **, struct options *);
PGconn *sql_conn(char *, struct options *);
void sql_exec_error (int);
void sql_exec_error(int);
int sql_exec(PGconn *, char *, int);
void sql_exec_dumpdb(PGconn *);
void sql_exec_dumptable(PGconn *, int);
@ -46,7 +47,8 @@ void sql_exec_searchtable(PGconn *, char *);
void sql_exec_searchoid(PGconn *, int);
/* fuction to parse command line options and check for some usage errors. */
void get_opts(int argc, char **argv, struct options *my_opts)
void
get_opts(int argc, char **argv, struct options * my_opts)
{
char c;
@ -63,9 +65,9 @@ void get_opts(int argc, char **argv, struct options *my_opts)
my_opts->remotepass = 0;
/* get opts */
while( (c = getopt(argc, argv, "H:p:U:P:d:t:o:xh?")) != EOF)
while ((c = getopt(argc, argv, "H:p:U:P:d:t:o:xh?")) != EOF)
{
switch(c)
switch (c)
{
/* specify the database */
case 'd':
@ -76,13 +78,13 @@ void get_opts(int argc, char **argv, struct options *my_opts)
/* specify the table name */
case 't':
/* make sure we set the database first */
if(!my_opts->getdatabase)
if (!my_opts->getdatabase)
{
fprintf(stderr, "Sorry, but you must specify a database to dump from.\n");
exit(1);
}
/* make sure we don't try to do a -o also */
if(my_opts->getoid)
if (my_opts->getoid)
{
fprintf(stderr, "Sorry, you can only specify either oid or table\n");
exit(1);
@ -96,13 +98,13 @@ void get_opts(int argc, char **argv, struct options *my_opts)
/* specify the oid int */
case 'o':
/* make sure we set the database first */
if(!my_opts->getdatabase)
if (!my_opts->getdatabase)
{
fprintf(stderr, "Sorry, but you must specify a database to dump from.\n");
exit(1);
}
/* make sure we don't try to do a -t also */
if(my_opts->gettable)
if (my_opts->gettable)
{
fprintf(stderr, "Sorry, you can only specify either oid or table\n");
exit(1);
@ -148,11 +150,11 @@ void get_opts(int argc, char **argv, struct options *my_opts)
case 'h':
fprintf(stderr, "\n\
Usage: pg_oid2name [-d database [-x] ] [-t table | -o oid] \n\
dafault action display all databases
dafault action display all databases\n\
-d database database to oid2name\n\
-x display system tables\n\
-t table | -o oid search for table name (-t) or\n\
oid (-o) in -d database
oid (-o) in -d database\n\
-H host connect to remote host\n\
-p port host port to connect to\n\
-U username username to connect with\n\
@ -165,11 +167,15 @@ Usage: pg_oid2name [-d database [-x] ] [-t table | -o oid] \n\
}
/* establish connection with database. */
PGconn *sql_conn(char *dbName, struct options *my_opts)
PGconn *
sql_conn(char *dbName, struct options * my_opts)
{
char *pghost, *pgport;
char *pgoptions, *pgtty;
char *pguser, *pgpass;
char *pghost,
*pgport;
char *pgoptions,
*pgtty;
char *pguser,
*pgpass;
PGconn *conn;
@ -184,27 +190,27 @@ PGconn *sql_conn(char *dbName, struct options *my_opts)
pgpass = NULL;
/* override the NULLs with the user params if passed */
if(my_opts->remotehost)
if (my_opts->remotehost)
{
pghost = (char *) malloc (128);
pghost = (char *) malloc(128);
sscanf(my_opts->_hostname, "%s", pghost);
}
if(my_opts->remoteport)
if (my_opts->remoteport)
{
pgport = (char *) malloc (6);
pgport = (char *) malloc(6);
sscanf(my_opts->_port, "%s", pgport);
}
if(my_opts->remoteuser)
if (my_opts->remoteuser)
{
pguser = (char *) malloc (128);
pguser = (char *) malloc(128);
sscanf(my_opts->_username, "%s", pguser);
}
if(my_opts->remotepass)
if (my_opts->remotepass)
{
pgpass = (char *) malloc (128);
pgpass = (char *) malloc(128);
sscanf(my_opts->_password, "%s", pgpass);
}
@ -228,10 +234,11 @@ PGconn *sql_conn(char *dbName, struct options *my_opts)
}
/* If the sql_ command has an error, this function looks up the error number and prints it out. */
void sql_exec_error (int error_number)
void
sql_exec_error(int error_number)
{
fprintf(stderr, "Error number %i.\n", error_number);
switch(error_number)
switch (error_number)
{
case 3:
fprintf(stderr, "Error: PGRES_COPY_OUT\n");
@ -256,13 +263,15 @@ void sql_exec_error (int error_number)
}
/* actual code to make call to the database and print the output data */
int sql_exec(PGconn *conn, char *todo, int match)
int
sql_exec(PGconn *conn, char *todo, int match)
{
PGresult *res;
int numbfields;
int error_number;
int i, len;
int i,
len;
/* make the call */
res = PQexec(conn, todo);
@ -284,15 +293,15 @@ int sql_exec(PGconn *conn, char *todo, int match)
numbfields = PQntuples(res);
/* if we only expect 1 and there mode than, return -2 */
if(match == 1 && numbfields > 1)
if (match == 1 && numbfields > 1)
return -2;
/* return -1 if there aren't any returns */
if(match == 1 && numbfields < 1)
if (match == 1 && numbfields < 1)
return -1;
/* for each row, dump the information */
for(i = 0; i < numbfields; i++)
for (i = 0; i < numbfields; i++)
{
len = strlen(PQgetvalue(res, i, 0));
@ -306,11 +315,12 @@ int sql_exec(PGconn *conn, char *todo, int match)
}
/* dump all databases know by the system table */
void sql_exec_dumpdb(PGconn *conn)
void
sql_exec_dumpdb(PGconn *conn)
{
char *todo;
todo = (char *) malloc (1024);
todo = (char *) malloc(1024);
/* get the oid and database name from the system pg_database table */
sprintf(todo, "select oid,datname from pg_database");
@ -320,14 +330,15 @@ void sql_exec_dumpdb(PGconn *conn)
/* display all tables in whatever db we are connected to. don't display the
system tables by default */
void sql_exec_dumptable(PGconn *conn, int systables)
void
sql_exec_dumptable(PGconn *conn, int systables)
{
char *todo;
todo = (char *) malloc (1024);
todo = (char *) malloc(1024);
/* don't exclude the systables if this is set */
if(systables == 1)
if (systables == 1)
sprintf(todo, "select relfilenode,relname from pg_class order by relname");
else
sprintf(todo, "select relfilenode,relname from pg_class where relname not like 'pg_%%' order by relname");
@ -337,12 +348,13 @@ void sql_exec_dumptable(PGconn *conn, int systables)
/* display the oid for a given tablename for whatever db we are connected
to. do we want to allow %bar% in the search? Not now. */
void sql_exec_searchtable(PGconn *conn, char *tablename)
void
sql_exec_searchtable(PGconn *conn, char *tablename)
{
int returnvalue;
char *todo;
todo = (char *) malloc (1024);
todo = (char *) malloc(1024);
/* get the oid and tablename where the name matches tablename */
sprintf(todo, "select relfilenode,relname from pg_class where relname = '%s'", tablename);
@ -350,52 +362,46 @@ void sql_exec_searchtable(PGconn *conn, char *tablename)
returnvalue = sql_exec(conn, todo, 1);
/* deal with the return errors */
if(returnvalue == -1)
{
if (returnvalue == -1)
printf("No tables with that name found\n");
}
if(returnvalue == -2)
{
if (returnvalue == -2)
printf("VERY scary: more than one table with that name found!!\n");
}
}
/* same as above */
void sql_exec_searchoid(PGconn *conn, int oid)
void
sql_exec_searchoid(PGconn *conn, int oid)
{
int returnvalue;
char *todo;
todo = (char *) malloc (1024);
todo = (char *) malloc(1024);
sprintf(todo, "select relfilenode,relname from pg_class where oid = %i", oid);
returnvalue = sql_exec(conn, todo, 1);
if(returnvalue == -1)
{
if (returnvalue == -1)
printf("No tables with that oid found\n");
}
if(returnvalue == -2)
{
if (returnvalue == -2)
printf("VERY scary: more than one table with that oid found!!\n");
}
}
int main(int argc, char **argv)
int
main(int argc, char **argv)
{
struct options *my_opts;
PGconn *pgconn;
my_opts = (struct options *) malloc (sizeof(struct options));
my_opts = (struct options *) malloc(sizeof(struct options));
/* parse the opts */
get_opts(argc, argv, my_opts);
/* display all the tables in the database */
if(my_opts->getdatabase & my_opts->gettable)
if (my_opts->getdatabase & my_opts->gettable)
{
printf("Oid of table %s from database \"%s\":\n", my_opts->_tbname, my_opts->_dbname);
printf("_______________________________\n");
@ -408,7 +414,7 @@ int main(int argc, char **argv)
}
/* search for the tablename of the given OID */
if(my_opts->getdatabase & my_opts->getoid)
if (my_opts->getdatabase & my_opts->getoid)
{
printf("Tablename of oid %i from database \"%s\":\n", my_opts->_oid, my_opts->_dbname);
printf("---------------------------------\n");
@ -421,7 +427,7 @@ int main(int argc, char **argv)
}
/* search for the oid for the given tablename */
if(my_opts->getdatabase)
if (my_opts->getdatabase)
{
printf("All tables from database \"%s\":\n", my_opts->_dbname);
printf("---------------------------------\n");

View File

@ -6,7 +6,7 @@
* copyright (c) Oliver Elphick <olly@lfix.co.uk>, 2001;
* licence: BSD
*
* $Header: /cvsroot/pgsql/contrib/pg_controldata/Attic/pg_controldata.c,v 1.2 2001/03/13 01:17:40 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/pg_controldata/Attic/pg_controldata.c,v 1.3 2001/03/22 03:59:09 momjian Exp $
*/
#include "postgres.h"
@ -51,8 +51,9 @@ main()
char ckpttime_str[32];
DataDir = getenv("PGDATA");
if ( DataDir == NULL ) {
fprintf(stderr,"PGDATA is not defined\n");
if (DataDir == NULL)
{
fprintf(stderr, "PGDATA is not defined\n");
exit(1);
}
@ -74,7 +75,7 @@ main()
/* Check the CRC. */
INIT_CRC64(crc);
COMP_CRC64(crc,
(char*) &ControlFile + sizeof(crc64),
(char *) &ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(crc);

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.5 2001/01/24 19:42:44 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.6 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -26,7 +26,7 @@ extern int errno;
void
load_lolist( LODumpMaster *pgLO )
load_lolist(LODumpMaster * pgLO)
{
LOlist *ll;
int i;
@ -52,25 +52,29 @@ load_lolist( LODumpMaster *pgLO )
" AND c.relkind = 'r' "
" AND c.relname NOT LIKE 'pg_%'");
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK) {
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn));
exit(RE_ERROR);
}
if ((n = PQntuples(pgLO->res)) == 0) {
if ((n = PQntuples(pgLO->res)) == 0)
{
fprintf(stderr, "%s: No OID columns in the database.\n", progname);
exit(RE_ERROR);
}
pgLO->lolist = (LOlist *) malloc((n + 1) * sizeof(LOlist));
if (!pgLO->lolist) {
if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR);
}
for (i = 0, ll = pgLO->lolist; i < n; i++, ll++) {
for (i = 0, ll = pgLO->lolist; i < n; i++, ll++)
{
ll->lo_table = strdup(PQgetvalue(pgLO->res, i, 0));
ll->lo_attr = strdup(PQgetvalue(pgLO->res, i, 1));
}
@ -80,15 +84,17 @@ load_lolist( LODumpMaster *pgLO )
}
void
pglo_export(LODumpMaster *pgLO)
pglo_export(LODumpMaster * pgLO)
{
LOlist *ll;
int tuples;
char path[BUFSIZ],
Qbuff[QUERY_BUFSIZ];
if (pgLO->action != ACTION_SHOW) {
if (pgLO->action != ACTION_SHOW)
{
time_t t;
time(&t);
fprintf(pgLO->index, "#\n# This is the PostgreSQL large object dump index\n#\n");
fprintf(pgLO->index, "#\tDate: %s", ctime(&t));
@ -100,7 +106,8 @@ pglo_export(LODumpMaster *pgLO)
pgLO->counter = 0;
for(ll=pgLO->lolist; ll->lo_table != NULL; ll++) {
for (ll = pgLO->lolist; ll->lo_table != NULL; ll++)
{
/* ----------
* Query: find the LOs referenced by this column
@ -113,15 +120,19 @@ pglo_export(LODumpMaster *pgLO)
pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK) {
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn));
}
else if ((tuples = PQntuples(pgLO->res)) == 0) {
else if ((tuples = PQntuples(pgLO->res)) == 0)
{
if (!pgLO->quiet && pgLO->action == ACTION_EXPORT_ATTR)
printf("%s: no large objects in \"%s\".\"%s\"\n",
progname, ll->lo_table, ll->lo_attr);
} else {
}
else
{
int t;
char *val;
@ -130,13 +141,16 @@ pglo_export(LODumpMaster *pgLO)
* Create DIR/FILE
* ----------
*/
if (pgLO->action != ACTION_SHOW) {
if (pgLO->action != ACTION_SHOW)
{
sprintf(path, "%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_table);
if (mkdir(path, DIR_UMASK) == -1) {
if (errno != EEXIST) {
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
@ -145,8 +159,10 @@ pglo_export(LODumpMaster *pgLO)
sprintf(path, "%s/%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_table, ll->lo_attr);
if (mkdir(path, DIR_UMASK) == -1) {
if (errno != EEXIST) {
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
@ -159,14 +175,16 @@ pglo_export(LODumpMaster *pgLO)
pgLO->counter += tuples;
for(t=0; t<tuples; t++) {
for (t = 0; t < tuples; t++)
{
Oid lo;
val = PQgetvalue(pgLO->res, t, 0);
lo = atooid(val);
if (pgLO->action == ACTION_SHOW) {
if (pgLO->action == ACTION_SHOW)
{
printf("%s.%s: %u\n", ll->lo_table, ll->lo_attr, lo);
continue;
}

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.3 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -25,20 +25,23 @@
extern int errno;
void
pglo_import(LODumpMaster *pgLO)
pglo_import(LODumpMaster * pgLO)
{
LOlist loa;
Oid new_oid;
char tab[MAX_TABLE_NAME], attr[MAX_ATTR_NAME],
path[BUFSIZ], lo_path[BUFSIZ],
char tab[MAX_TABLE_NAME],
attr[MAX_ATTR_NAME],
path[BUFSIZ],
lo_path[BUFSIZ],
Qbuff[QUERY_BUFSIZ];
while(fgets(Qbuff, QUERY_BUFSIZ, pgLO->index)) {
while (fgets(Qbuff, QUERY_BUFSIZ, pgLO->index))
{
if (*Qbuff == '#')
continue;
if (! pgLO->remove && ! pgLO->quiet)
if (!pgLO->remove && !pgLO->quiet)
printf(Qbuff);
sscanf(Qbuff, "%u\t%s\t%s\t%s\n", &loa.lo_oid, tab, attr, path);
@ -51,7 +54,8 @@ pglo_import(LODumpMaster *pgLO)
* Import LO
* ----------
*/
if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0) {
if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0)
{
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
@ -60,7 +64,8 @@ pglo_import(LODumpMaster *pgLO)
exit(RE_ERROR);
}
if (pgLO->remove) {
if (pgLO->remove)
{
notice(pgLO, FALSE);
if (lo_unlink(pgLO->conn, loa.lo_oid) < 0)
fprintf(stderr, "%s: can't remove LO %u:\n%s",
@ -81,12 +86,13 @@ pglo_import(LODumpMaster *pgLO)
sprintf(Qbuff, "UPDATE \"%s\" SET \"%s\"=%u WHERE \"%s\"=%u",
loa.lo_table, loa.lo_attr, new_oid, loa.lo_attr, loa.lo_oid);
/*fprintf(stderr, Qbuff);*/
/* fprintf(stderr, Qbuff); */
pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_COMMAND_OK) {
fprintf(stderr, "%s: %s\n",progname, PQerrorMessage(pgLO->conn));
if (PQresultStatus(pgLO->res) != PGRES_COMMAND_OK)
{
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
PQclear(pgLO->res);
PQexec(pgLO->conn, "ROLLBACK");
fprintf(stderr, "\n%s: ROLLBACK\n", progname);
@ -94,4 +100,4 @@ pglo_import(LODumpMaster *pgLO)
}
PQclear(pgLO->res);
}
}
}

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.6 2001/02/10 02:31:25 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.7 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -24,9 +24,9 @@
#include "pg_dumplo.h"
#ifdef HAVE_GETOPT_LONG
#include <getopt.h>
#define no_argument 0
#define required_argument 1
#include <getopt.h>
#define no_argument 0
#define required_argument 1
#endif
extern int errno;
@ -35,7 +35,7 @@ char *progname = NULL;
int main(int argc, char **argv);
static void usage(void);
static void parse_lolist (LODumpMaster *pgLO);
static void parse_lolist(LODumpMaster * pgLO);
/*-----
@ -45,7 +45,8 @@ static void parse_lolist (LODumpMaster *pgLO);
int
main(int argc, char **argv)
{
LODumpMaster _pgLO, *pgLO = &_pgLO;
LODumpMaster _pgLO,
*pgLO = &_pgLO;
char *pwd = NULL;
pgLO->argv = argv;
@ -68,33 +69,37 @@ main(int argc, char **argv)
* Parse ARGV
* ----------
*/
if (argc > 1) {
if (argc > 1)
{
int arg;
extern int optind;
#ifdef HAVE_GETOPT_LONG
int l_index=0;
int l_index = 0;
static struct option l_opt[] = {
{ "help", no_argument, 0, 'h' },
{ "user", required_argument, 0, 'u' },
{ "pwd", required_argument, 0, 'p' },
{ "db", required_argument, 0, 'd' },
{ "host", required_argument, 0, 'h' },
{ "space", required_argument, 0, 's' },
{ "import", no_argument, 0, 'i' },
{ "export", no_argument, 0, 'e' },
{ "remove", no_argument, 0, 'r' },
{ "quiet", no_argument, 0, 'q' },
{ "all", no_argument, 0, 'a' },
{ "show", no_argument, 0, 'w' },
{ NULL, 0, 0, 0 }
{"help", no_argument, 0, 'h'},
{"user", required_argument, 0, 'u'},
{"pwd", required_argument, 0, 'p'},
{"db", required_argument, 0, 'd'},
{"host", required_argument, 0, 'h'},
{"space", required_argument, 0, 's'},
{"import", no_argument, 0, 'i'},
{"export", no_argument, 0, 'e'},
{"remove", no_argument, 0, 'r'},
{"quiet", no_argument, 0, 'q'},
{"all", no_argument, 0, 'a'},
{"show", no_argument, 0, 'w'},
{NULL, 0, 0, 0}
};
while((arg = getopt_long(argc, argv, "?aehu:p:qd:l:t:irs:w", l_opt, &l_index)) != -1) {
while ((arg = getopt_long(argc, argv, "?aehu:p:qd:l:t:irs:w", l_opt, &l_index)) != -1)
{
#else
while((arg = getopt(argc, argv, "?aehu:p:qd:l:t:irs:w")) != -1) {
while ((arg = getopt(argc, argv, "?aehu:p:qd:l:t:irs:w")) != -1)
{
#endif
switch(arg) {
switch (arg)
{
case '?':
case 'h':
usage();
@ -119,8 +124,8 @@ main(int argc, char **argv)
break;
case 'l':
pgLO->action = ACTION_EXPORT_ATTR;
pgLO->lolist_start = optind-1;
parse_lolist (pgLO);
pgLO->lolist_start = optind - 1;
parse_lolist(pgLO);
break;
case 'e':
case 'a':
@ -141,7 +146,9 @@ main(int argc, char **argv)
exit(RE_ERROR);
}
}
} else {
}
else
{
usage();
exit(RE_ERROR);
}
@ -150,14 +157,17 @@ main(int argc, char **argv)
* Check space
* ----------
*/
if (! pgLO->space && ! pgLO->action == ACTION_SHOW) {
if (!(pgLO->space = getenv("PWD"))) {
if (!pgLO->space && !pgLO->action == ACTION_SHOW)
{
if (!(pgLO->space = getenv("PWD")))
{
fprintf(stderr, "%s: not set space for dump-tree (option '-s' or $PWD).\n", progname);
exit(RE_ERROR);
}
}
if (!pgLO->action) {
if (!pgLO->action)
{
fprintf(stderr, "%s: What do you want - export or import?\n", progname);
exit(RE_ERROR);
}
@ -169,7 +179,8 @@ main(int argc, char **argv)
pgLO->conn = PQsetdbLogin(pgLO->host, NULL, NULL, NULL, pgLO->db,
pgLO->user, pwd);
if (PQstatus(pgLO->conn) == CONNECTION_BAD) {
if (PQstatus(pgLO->conn) == CONNECTION_BAD)
{
fprintf(stderr, "%s (connection): %s\n", progname, PQerrorMessage(pgLO->conn));
exit(RE_ERROR);
}
@ -187,7 +198,8 @@ main(int argc, char **argv)
PQexec(pgLO->conn, "BEGIN");
switch(pgLO->action) {
switch (pgLO->action)
{
case ACTION_SHOW:
case ACTION_EXPORT_ALL:
@ -196,7 +208,8 @@ main(int argc, char **argv)
case ACTION_EXPORT_ATTR:
pglo_export(pgLO);
if (!pgLO->quiet) {
if (!pgLO->quiet)
{
if (pgLO->action == ACTION_SHOW)
printf("\nDatabase '%s' contains %d large objects.\n\n", pgLO->db, pgLO->counter);
else
@ -221,27 +234,30 @@ main(int argc, char **argv)
}
static void
parse_lolist (LODumpMaster *pgLO)
parse_lolist(LODumpMaster * pgLO)
{
LOlist *ll;
char **d,
*loc,
buff[MAX_TABLE_NAME + MAX_ATTR_NAME +1];
buff[MAX_TABLE_NAME + MAX_ATTR_NAME + 1];
pgLO->lolist = (LOlist *) malloc(pgLO->argc * sizeof(LOlist));
if (! pgLO->lolist) {
if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR);
}
for( d=pgLO->argv + pgLO->lolist_start, ll=pgLO->lolist;
for (d = pgLO->argv + pgLO->lolist_start, ll = pgLO->lolist;
*d != NULL;
d++, ll++) {
d++, ll++)
{
strncpy(buff, *d, MAX_TABLE_NAME + MAX_ATTR_NAME);
if ((loc = strchr(buff, '.')) == NULL) {
if ((loc = strchr(buff, '.')) == NULL)
{
fprintf(stderr, "%s: '%s' is bad 'table.attr'\n", progname, buff);
exit(RE_ERROR);
}
@ -304,5 +320,5 @@ usage()
" * option '-i' without option '-r' make new large obj in DB\n"
" not rewrite old, the '-i' UPDATE oid numbers in table.attr only!\n"
" * if is not set option -s, the pg_dumplo use $PWD\n"
); /* puts()*/
); /* puts() */
}

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/pg_dumplo.h,v 1.3 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/pg_dumplo.h,v 1.4 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -36,13 +36,15 @@
* LO struct
* ----------
*/
typedef struct {
typedef struct
{
char *lo_table,
*lo_attr;
Oid lo_oid;
} LOlist;
typedef struct {
typedef struct
{
int action;
LOlist *lolist;
char **argv,
@ -60,7 +62,8 @@ typedef struct {
PGconn *conn;
} LODumpMaster;
typedef enum {
typedef enum
{
ACTION_NONE,
ACTION_SHOW,
ACTION_EXPORT_ATTR,
@ -70,10 +73,10 @@ typedef enum {
extern char *progname;
extern void notice (LODumpMaster *pgLO, int set);
extern void index_file (LODumpMaster *pgLO);
extern void load_lolist (LODumpMaster *pgLO);
extern void pglo_export (LODumpMaster *pgLO);
extern void pglo_import (LODumpMaster *pgLO);
extern void notice(LODumpMaster * pgLO, int set);
extern void index_file(LODumpMaster * pgLO);
extern void load_lolist(LODumpMaster * pgLO);
extern void pglo_export(LODumpMaster * pgLO);
extern void pglo_import(LODumpMaster * pgLO);
#endif /* PG_DUMPLO_H */

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/utils.c,v 1.3 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/utils.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -24,12 +24,12 @@
extern int errno;
static void Dummy_NoticeProcessor(void * arg, const char * message);
static void Default_NoticeProcessor(void * arg, const char * message);
static void Dummy_NoticeProcessor(void *arg, const char *message);
static void Default_NoticeProcessor(void *arg, const char *message);
void
index_file(LODumpMaster *pgLO)
index_file(LODumpMaster * pgLO)
{
char path[BUFSIZ];
@ -39,10 +39,13 @@ index_file(LODumpMaster *pgLO)
sprintf(path, "%s/%s", pgLO->space, pgLO->db);
if (pgLO->action == ACTION_EXPORT_ATTR ||
pgLO->action == ACTION_EXPORT_ALL) {
pgLO->action == ACTION_EXPORT_ALL)
{
if (mkdir(path, DIR_UMASK) == -1) {
if (errno != EEXIST) {
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
@ -50,16 +53,20 @@ index_file(LODumpMaster *pgLO)
sprintf(path, "%s/lo_dump.index", path);
if ((pgLO->index = fopen(path, "w")) == NULL) {
if ((pgLO->index = fopen(path, "w")) == NULL)
{
perror(path);
exit(RE_ERROR);
}
} else if (pgLO->action != ACTION_NONE ) {
}
else if (pgLO->action != ACTION_NONE)
{
sprintf(path, "%s/lo_dump.index", path);
if ((pgLO->index = fopen(path, "r")) == NULL) {
if ((pgLO->index = fopen(path, "r")) == NULL)
{
perror(path);
exit(RE_ERROR);
}
@ -67,20 +74,24 @@ index_file(LODumpMaster *pgLO)
}
static
void Dummy_NoticeProcessor(void * arg, const char * message)
void
Dummy_NoticeProcessor(void *arg, const char *message)
{
;
}
static
void Default_NoticeProcessor(void * arg, const char * message)
void
Default_NoticeProcessor(void *arg, const char *message)
{
fprintf(stderr, "%s", message);
}
void
notice(LODumpMaster *pgLO, int set)
notice(LODumpMaster * pgLO, int set)
{
if (set)PQsetNoticeProcessor(pgLO->conn, Default_NoticeProcessor, NULL);
else PQsetNoticeProcessor(pgLO->conn, Dummy_NoticeProcessor, NULL);
if (set)
PQsetNoticeProcessor(pgLO->conn, Default_NoticeProcessor, NULL);
else
PQsetNoticeProcessor(pgLO->conn, Dummy_NoticeProcessor, NULL);
}

View File

@ -13,24 +13,48 @@
#include <syslog.h>
#include <string.h>
struct {
struct
{
const char *tag;
int size;
int priority;
} tags[] = {
{ "", 0, LOG_NOTICE },
{ "emerg:", sizeof("emerg"), LOG_EMERG },
{ "alert:", sizeof("alert"), LOG_ALERT },
{ "crit:", sizeof("crit"), LOG_CRIT },
{ "err:", sizeof("err"), LOG_ERR },
{ "error:", sizeof("error"), LOG_ERR },
{ "warning:", sizeof("warning"), LOG_WARNING },
{ "notice:", sizeof("notice"), LOG_NOTICE },
{ "info:", sizeof("info"), LOG_INFO },
{ "debug:", sizeof("debug"), LOG_DEBUG }
} tags[] =
{
{
"", 0, LOG_NOTICE
},
{
"emerg:", sizeof("emerg"), LOG_EMERG
},
{
"alert:", sizeof("alert"), LOG_ALERT
},
{
"crit:", sizeof("crit"), LOG_CRIT
},
{
"err:", sizeof("err"), LOG_ERR
},
{
"error:", sizeof("error"), LOG_ERR
},
{
"warning:", sizeof("warning"), LOG_WARNING
},
{
"notice:", sizeof("notice"), LOG_NOTICE
},
{
"info:", sizeof("info"), LOG_INFO
},
{
"debug:", sizeof("debug"), LOG_DEBUG
}
};
int main()
int
main()
{
char buf[301];
int c;
@ -40,29 +64,29 @@ int main()
#ifndef DEBUG
openlog("postgresql", LOG_CONS, LOG_LOCAL1);
#endif
while ( (c = getchar()) != EOF) {
if (c == '\r') {
while ((c = getchar()) != EOF)
{
if (c == '\r')
continue;
}
if (c == '\n') {
int level = sizeof(tags)/sizeof(*tags);
if (c == '\n')
{
int level = sizeof(tags) / sizeof(*tags);
char *bol;
if (colon == 0 || (size_t)(colon - buf) > sizeof("warning")) {
if (colon == 0 || (size_t) (colon - buf) > sizeof("warning"))
level = 1;
}
*pos = 0;
while (--level) {
while (--level)
{
if (pos - buf >= tags[level].size
&& strncmp(buf, tags[level].tag, tags[level].size) == 0) {
&& strncmp(buf, tags[level].tag, tags[level].size) == 0)
break;
}
}
bol = buf + tags[level].size;
if (bol > buf && *bol == ' ') {
if (bol > buf && *bol == ' ')
++bol;
}
if (pos - bol > 0) {
if (pos - bol > 0)
{
#ifndef DEBUG
syslog(tags[level].priority, "%s", bol);
#else
@ -70,16 +94,13 @@ int main()
#endif
}
pos = buf;
colon = (char const *)0;
colon = (char const *) 0;
continue;
}
if (c == ':' && !colon) {
if (c == ':' && !colon)
colon = pos;
}
if ((size_t)(pos - buf) < sizeof(buf)-1) {
if ((size_t) (pos - buf) < sizeof(buf) - 1)
*pos++ = c;
}
}
return 0;
}

View File

@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/contrib/pg_resetxlog/Attic/pg_resetxlog.c,v 1.2 2001/03/16 05:08:39 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/pg_resetxlog/Attic/pg_resetxlog.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -110,7 +110,8 @@ static char XLogDir[MAXPGPATH];
static char ControlFilePath[MAXPGPATH];
static ControlFileData ControlFile; /* pg_control values */
static uint32 newXlogId, newXlogSeg; /* ID/Segment of new XLOG segment */
static uint32 newXlogId,
newXlogSeg; /* ID/Segment of new XLOG segment */
static bool guessed = false; /* T if we had to guess at any values */
@ -146,10 +147,11 @@ ReadControlFile(void)
if ((fd = open(ControlFilePath, O_RDONLY)) < 0)
{
/*
* If pg_control is not there at all, or we can't read it,
* the odds are we've been handed a bad DataDir path, so give up.
* User can do "touch pg_control" to force us to proceed.
* If pg_control is not there at all, or we can't read it, the
* odds are we've been handed a bad DataDir path, so give up. User
* can do "touch pg_control" to force us to proceed.
*/
perror("Failed to open $PGDATA/global/pg_control for reading");
if (errno == ENOENT)
@ -193,6 +195,7 @@ ReadControlFile(void)
guessed = true;
return true;
}
/*
* Maybe it's a 7.1beta pg_control.
*/
@ -222,49 +225,49 @@ typedef struct crc64V0
} crc64V0;
static uint32 crc_tableV0[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
#define INIT_CRC64V0(crc) ((crc).crc1 = 0xffffffff, (crc).crc2 = 0xffffffff)
@ -356,7 +359,7 @@ typedef struct XLogPageHeaderDataV0
typedef XLogPageHeaderDataV0 *XLogPageHeaderV0;
static bool RecordIsValidV0(XLogRecordV0 *record);
static bool RecordIsValidV0(XLogRecordV0 * record);
static XLogRecordV0 *ReadRecordV0(XLogRecPtr *RecPtr, char *buffer);
static bool ValidXLOGHeaderV0(XLogPageHeaderV0 hdr);
@ -409,6 +412,7 @@ CheckControlVersion0(char *buffer, int len)
(char *) malloc(_INTL_MAXLOGRECSZ));
if (record == NULL)
{
/*
* We have to guess at the checkpoint contents.
*/
@ -435,26 +439,26 @@ CheckControlVersion0(char *buffer, int len)
* We assume all of the record has been read into memory at *record.
*/
static bool
RecordIsValidV0(XLogRecordV0 *record)
RecordIsValidV0(XLogRecordV0 * record)
{
crc64V0 crc;
uint32 len = record->xl_len;
/*
* NB: this code is not right for V0 records containing backup blocks,
* but for now it's only going to be applied to checkpoint records,
* so I'm not going to worry about it...
* but for now it's only going to be applied to checkpoint records, so
* I'm not going to worry about it...
*/
INIT_CRC64V0(crc);
COMP_CRC64V0(crc, XLogRecGetData(record), len);
COMP_CRC64V0(crc, (char*) record + sizeof(crc64V0),
COMP_CRC64V0(crc, (char *) record + sizeof(crc64V0),
SizeOfXLogRecordV0 - sizeof(crc64V0));
FIN_CRC64V0(crc);
if (!EQ_CRC64V0(record->xl_crc, crc))
return false;
return(true);
return (true);
}
/*
@ -489,7 +493,7 @@ ReadRecordV0(XLogRecPtr *RecPtr, char *buffer)
readFile = XLogFileOpen(readId, readSeg);
if (readFile < 0)
goto next_record_is_invalid;
readOff = (uint32) (-1); /* force read to occur below */
readOff = (uint32) (-1);/* force read to occur below */
}
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@ -510,10 +514,13 @@ ReadRecordV0(XLogRecPtr *RecPtr, char *buffer)
if (record->xl_len == 0)
goto next_record_is_invalid;
/*
* Compute total length of record including any appended backup blocks.
* Compute total length of record including any appended backup
* blocks.
*/
total_len = SizeOfXLogRecordV0 + record->xl_len;
/*
* Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea
@ -557,7 +564,7 @@ ReadRecordV0(XLogRecPtr *RecPtr, char *buffer)
len = BLCKSZ - SizeOfXLogPHDV0 - SizeOfXLogContRecordV0;
if (contrecord->xl_len > len)
{
memcpy(buffer, (char *)contrecord + SizeOfXLogContRecordV0, len);
memcpy(buffer, (char *) contrecord + SizeOfXLogContRecordV0, len);
gotlen += len;
buffer += len;
continue;
@ -610,6 +617,7 @@ GuessControlValues(void)
{
#ifdef USE_LOCALE
char *localeptr;
#endif
/*
@ -710,8 +718,8 @@ RewriteControlFile(void)
char buffer[BLCKSZ]; /* need not be aligned */
/*
* Adjust fields as needed to force an empty XLOG starting at the
* next available segment.
* Adjust fields as needed to force an empty XLOG starting at the next
* available segment.
*/
newXlogId = ControlFile.logId;
newXlogSeg = ControlFile.logSeg;
@ -735,16 +743,16 @@ RewriteControlFile(void)
/* Contents are protected with a CRC */
INIT_CRC64(ControlFile.crc);
COMP_CRC64(ControlFile.crc,
(char*) &ControlFile + sizeof(crc64),
(char *) &ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile.crc);
/*
* We write out BLCKSZ bytes into pg_control, zero-padding the
* excess over sizeof(ControlFileData). This reduces the odds
* of premature-EOF errors when reading pg_control. We'll still
* fail when we check the contents of the file, but hopefully with
* a more specific error than "couldn't read pg_control".
* We write out BLCKSZ bytes into pg_control, zero-padding the excess
* over sizeof(ControlFileData). This reduces the odds of
* premature-EOF errors when reading pg_control. We'll still fail
* when we check the contents of the file, but hopefully with a more
* specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
{
@ -858,7 +866,7 @@ WriteEmptyXLOG(void)
INIT_CRC64(crc);
COMP_CRC64(crc, &ControlFile.checkPointCopy, sizeof(CheckPoint));
COMP_CRC64(crc, (char*) record + sizeof(crc64),
COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
record->xl_crc = crc;
@ -914,7 +922,7 @@ usage(void)
int
main(int argc, char ** argv)
main(int argc, char **argv)
{
int argn;
bool force = false;
@ -934,7 +942,7 @@ main(int argc, char ** argv)
usage();
}
if (argn != argc-1) /* one required non-switch argument */
if (argn != argc - 1) /* one required non-switch argument */
usage();
DataDir = argv[argn++];
@ -946,7 +954,8 @@ main(int argc, char ** argv)
/*
* Check for a postmaster lock file --- if there is one, refuse to
* proceed, on grounds we might be interfering with a live installation.
* proceed, on grounds we might be interfering with a live
* installation.
*/
snprintf(path, MAXPGPATH, "%s%cpostmaster.pid", DataDir, SEP_CHAR);
@ -973,8 +982,8 @@ main(int argc, char ** argv)
GuessControlValues();
/*
* If we had to guess anything, and -f was not given, just print
* the guessed values and exit. Also print if -n is given.
* If we had to guess anything, and -f was not given, just print the
* guessed values and exit. Also print if -n is given.
*/
if ((guessed && !force) || noupdate)
{

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: encode.c,v 1.3 2001/02/10 02:31:25 tgl Exp $
* $Id: encode.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -43,9 +43,9 @@
#endif
static pg_coding *
find_coding(pg_coding *hbuf, text *name, int silent);
find_coding(pg_coding * hbuf, text *name, int silent);
static pg_coding *
pg_find_coding(pg_coding *res, char *name);
pg_find_coding(pg_coding * res, char *name);
/* SQL function: encode(bytea, text) returns text */
@ -56,8 +56,11 @@ encode(PG_FUNCTION_ARGS)
{
text *arg;
text *name;
uint len, rlen, rlen0;
pg_coding *c, cbuf;
uint len,
rlen,
rlen0;
pg_coding *c,
cbuf;
text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -71,7 +74,7 @@ encode(PG_FUNCTION_ARGS)
rlen0 = c->encode_len(len);
res = (text *)palloc(rlen0 + VARHDRSZ);
res = (text *) palloc(rlen0 + VARHDRSZ);
rlen = c->encode(VARDATA(arg), len, VARDATA(res));
VARATT_SIZEP(res) = rlen + VARHDRSZ;
@ -93,8 +96,11 @@ decode(PG_FUNCTION_ARGS)
{
text *arg;
text *name;
uint len, rlen, rlen0;
pg_coding *c, cbuf;
uint len,
rlen,
rlen0;
pg_coding *c,
cbuf;
text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -108,7 +114,7 @@ decode(PG_FUNCTION_ARGS)
rlen0 = c->decode_len(len);
res = (text *)palloc(rlen0 + VARHDRSZ);
res = (text *) palloc(rlen0 + VARHDRSZ);
rlen = c->decode(VARDATA(arg), len, VARDATA(res));
VARATT_SIZEP(res) = rlen + VARHDRSZ;
@ -123,14 +129,15 @@ decode(PG_FUNCTION_ARGS)
}
static pg_coding *
find_coding(pg_coding *dst, text *name, int silent)
find_coding(pg_coding * dst, text *name, int silent)
{
pg_coding *p;
char buf[NAMEDATALEN];
uint len;
len = VARSIZE(name) - VARHDRSZ;
if (len >= NAMEDATALEN) {
if (len >= NAMEDATALEN)
{
if (silent)
return NULL;
elog(ERROR, "Encoding type does not exist (name too long)");
@ -152,12 +159,14 @@ uint
hex_encode(uint8 *src, uint len, uint8 *dst)
{
uint8 *end = src + len;
while (src < end) {
while (src < end)
{
*dst++ = hextbl[(*src >> 4) & 0xF];
*dst++ = hextbl[*src & 0xF];
src++;
}
return len*2;
return len * 2;
}
/* probably should use lookup table */
@ -181,12 +190,19 @@ get_hex(char c)
uint
hex_decode(uint8 *src, uint len, uint8 *dst)
{
uint8 *s, *srcend, v1, v2, *p = dst;
uint8 *s,
*srcend,
v1,
v2,
*p = dst;
srcend = src + len;
s = src; p = dst;
while (s < srcend) {
if (*s == ' ' || *s == '\n' || *s == '\t' || *s == '\r') {
s = src;
p = dst;
while (s < srcend)
{
if (*s == ' ' || *s == '\n' || *s == '\t' || *s == '\r')
{
s++;
continue;
}
@ -202,24 +218,30 @@ hex_decode(uint8 *src, uint len, uint8 *dst)
static unsigned char _base64[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
uint
b64_encode(uint8 *src, uint len, uint8 *dst)
{
uint8 *s, *p, *end = src + len, *lend = dst + 76;
uint8 *s,
*p,
*end = src + len,
*lend = dst + 76;
int pos = 2;
unsigned long buf = 0;
s = src; p = dst;
s = src;
p = dst;
while (s < end) {
while (s < end)
{
buf |= *s << (pos << 3);
pos--;
s++;
/* write it out */
if (pos < 0) {
if (pos < 0)
{
*p++ = _base64[(buf >> 18) & 0x3f];
*p++ = _base64[(buf >> 12) & 0x3f];
*p++ = _base64[(buf >> 6) & 0x3f];
@ -228,12 +250,14 @@ b64_encode(uint8 *src, uint len, uint8 *dst)
pos = 2;
buf = 0;
}
if (p >= lend) {
if (p >= lend)
{
*p++ = '\n';
lend = p + 76;
}
}
if (pos != 2) {
if (pos != 2)
{
*p++ = _base64[(buf >> 18) & 0x3f];
*p++ = _base64[(buf >> 12) & 0x3f];
*p++ = (pos == 0) ? _base64[(buf >> 6) & 0x3f] : '=';
@ -247,14 +271,17 @@ b64_encode(uint8 *src, uint len, uint8 *dst)
uint
b64_decode(uint8 *src, uint len, uint8 *dst)
{
char *srcend = src + len, *s = src;
char *srcend = src + len,
*s = src;
uint8 *p = dst;
char c;
uint b = 0;
unsigned long buf = 0;
int pos = 0, end = 0;
int pos = 0,
end = 0;
while (s < srcend) {
while (s < srcend)
{
c = *s++;
if (c >= 'A' && c <= 'Z')
b = c - 'A';
@ -266,16 +293,21 @@ b64_decode(uint8 *src, uint len, uint8 *dst)
b = 62;
else if (c == '/')
b = 63;
else if (c == '=') {
else if (c == '=')
{
/* end sequence */
if (!end) {
if (pos == 2) end = 1;
else if (pos == 3) end = 2;
if (!end)
{
if (pos == 2)
end = 1;
else if (pos == 3)
end = 2;
else
elog(ERROR, "base64: unexpected '='");
}
b = 0;
} else if (c == ' ' || c == '\t' || c == '\n' || c == '\r')
}
else if (c == ' ' || c == '\t' || c == '\n' || c == '\r')
continue;
else
elog(ERROR, "base64: Invalid symbol");
@ -283,7 +315,8 @@ b64_decode(uint8 *src, uint len, uint8 *dst)
/* add it to buffer */
buf = (buf << 6) + b;
pos++;
if (pos == 4) {
if (pos == 4)
{
*p++ = (buf >> 16) & 255;
if (end == 0 || end > 1)
*p++ = (buf >> 8) & 255;
@ -326,21 +359,22 @@ b64_dec_len(uint srclen)
}
static pg_coding
encoding_list [] = {
{ "hex", hex_enc_len, hex_dec_len, hex_encode, hex_decode},
{ "base64", b64_enc_len, b64_dec_len, b64_encode, b64_decode},
{ NULL, NULL, NULL, NULL, NULL}
encoding_list[] = {
{"hex", hex_enc_len, hex_dec_len, hex_encode, hex_decode},
{"base64", b64_enc_len, b64_dec_len, b64_encode, b64_decode},
{NULL, NULL, NULL, NULL, NULL}
};
static pg_coding *
pg_find_coding(pg_coding *res, char *name)
pg_find_coding(pg_coding * res, char *name)
{
pg_coding *p;
for (p = encoding_list; p->name; p++) {
for (p = encoding_list; p->name; p++)
{
if (!strcasecmp(p->name, name))
return p;
}
return NULL;
}

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: encode.h,v 1.1 2001/01/24 03:46:16 momjian Exp $
* $Id: encode.h,v 1.2 2001/03/22 03:59:10 momjian Exp $
*/
#ifndef __PG_ENCODE_H
@ -37,12 +37,13 @@ Datum encode(PG_FUNCTION_ARGS);
Datum decode(PG_FUNCTION_ARGS);
typedef struct _pg_coding pg_coding;
struct _pg_coding {
struct _pg_coding
{
char *name;
uint (*encode_len)(uint dlen);
uint (*decode_len)(uint dlen);
uint (*encode)(uint8 *data, uint dlen, uint8 *res);
uint (*decode)(uint8 *data, uint dlen, uint8 *res);
uint (*encode_len) (uint dlen);
uint (*decode_len) (uint dlen);
uint (*encode) (uint8 *data, uint dlen, uint8 *res);
uint (*decode) (uint8 *data, uint dlen, uint8 *res);
};
/* They are for outside usage in C code, if needed */
@ -57,4 +58,3 @@ uint b64_enc_len(uint srclen);
uint b64_dec_len(uint srclen);
#endif /* __PG_ENCODE_H */

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: internal.c,v 1.2 2001/02/10 02:31:25 tgl Exp $
* $Id: internal.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -49,29 +49,30 @@
#endif
static uint
pg_md5_len(pg_digest *h);
pg_md5_len(pg_digest * h);
static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf);
pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static uint
pg_sha1_len(pg_digest *h);
pg_sha1_len(pg_digest * h);
static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf);
pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static pg_digest
int_digest_list [] = {
{ "md5", pg_md5_len, pg_md5_digest, {0}},
{ "sha1", pg_sha1_len, pg_sha1_digest, {0}},
{ NULL, NULL, NULL, {0}}
int_digest_list[] = {
{"md5", pg_md5_len, pg_md5_digest, {0}},
{"sha1", pg_sha1_len, pg_sha1_digest, {0}},
{NULL, NULL, NULL, {0}}
};
static uint
pg_md5_len(pg_digest *h) {
pg_md5_len(pg_digest * h)
{
return MD5_DIGEST_LENGTH;
}
static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{
MD5_CTX ctx;
@ -83,12 +84,13 @@ pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
}
static uint
pg_sha1_len(pg_digest *h) {
pg_sha1_len(pg_digest * h)
{
return SHA1_DIGEST_LENGTH;
}
static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{
SHA1_CTX ctx;
@ -101,7 +103,7 @@ pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_digest *
pg_find_digest(pg_digest *h, char *name)
pg_find_digest(pg_digest * h, char *name)
{
pg_digest *p;
@ -110,5 +112,3 @@ pg_find_digest(pg_digest *h, char *name)
return p;
return NULL;
}

View File

@ -31,7 +31,7 @@
* It is possible that this works with other SHA1/MD5
* implementations too.
*
* $Id: krb.c,v 1.3 2001/02/20 15:34:14 momjian Exp $
* $Id: krb.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -54,29 +54,30 @@
#endif
static uint
pg_md5_len(pg_digest *h);
pg_md5_len(pg_digest * h);
static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf);
pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static uint
pg_sha1_len(pg_digest *h);
pg_sha1_len(pg_digest * h);
static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf);
pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static pg_digest
int_digest_list [] = {
{ "md5", pg_md5_len, pg_md5_digest, {0}},
{ "sha1", pg_sha1_len, pg_sha1_digest, {0}},
{ NULL, NULL, NULL, {0}}
int_digest_list[] = {
{"md5", pg_md5_len, pg_md5_digest, {0}},
{"sha1", pg_sha1_len, pg_sha1_digest, {0}},
{NULL, NULL, NULL, {0}}
};
static uint
pg_md5_len(pg_digest *h) {
pg_md5_len(pg_digest * h)
{
return MD5_DIGEST_LENGTH;
}
static uint8 *
pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_md5_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{
MD5_CTX ctx;
@ -88,12 +89,13 @@ pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
}
static uint
pg_sha1_len(pg_digest *h) {
pg_sha1_len(pg_digest * h)
{
return SHA1_DIGEST_LENGTH;
}
static uint8 *
pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_sha1_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{
SHA1_CTX ctx;
@ -106,7 +108,7 @@ pg_sha1_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_digest *
pg_find_digest(pg_digest *h, char *name)
pg_find_digest(pg_digest * h, char *name)
{
pg_digest *p;
@ -115,5 +117,3 @@ pg_find_digest(pg_digest *h, char *name)
return p;
return NULL;
}

View File

@ -1,4 +1,4 @@
/* $Id: md5.c,v 1.4 2001/02/10 02:31:25 tgl Exp $ */
/* $Id: md5.c,v 1.5 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: md5.c,v 1.3 2000/02/22 14:01:17 itojun Exp $ */
/*
@ -125,10 +125,11 @@ static const uint8 md5_paddat[MD5_BUFLEN] = {
0, 0, 0, 0, 0, 0, 0, 0,
};
static void md5_calc (uint8 *, md5_ctxt *);
static void md5_calc(uint8 *, md5_ctxt *);
void md5_init(ctxt)
md5_ctxt *ctxt;
void
md5_init(ctxt)
md5_ctxt *ctxt;
{
ctxt->md5_n = 0;
ctxt->md5_i = 0;
@ -139,52 +140,60 @@ void md5_init(ctxt)
bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
}
void md5_loop(ctxt, input, len)
md5_ctxt *ctxt;
uint8 *input;
unsigned int len; /* number of bytes */
void
md5_loop(ctxt, input, len)
md5_ctxt *ctxt;
uint8 *input;
unsigned int len; /* number of bytes */
{
unsigned int gap, i;
unsigned int gap,
i;
ctxt->md5_n += len * 8; /* byte to bit */
gap = MD5_BUFLEN - ctxt->md5_i;
if (len >= gap) {
bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
if (len >= gap)
{
bcopy((void *) input, (void *) (ctxt->md5_buf + ctxt->md5_i),
gap);
md5_calc(ctxt->md5_buf, ctxt);
for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
md5_calc((uint8 *)(input + i), ctxt);
}
for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN)
md5_calc((uint8 *) (input + i), ctxt);
ctxt->md5_i = len - i;
bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
} else {
bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
bcopy((void *) (input + i), (void *) ctxt->md5_buf, ctxt->md5_i);
}
else
{
bcopy((void *) input, (void *) (ctxt->md5_buf + ctxt->md5_i),
len);
ctxt->md5_i += len;
}
}
void md5_pad(ctxt)
md5_ctxt *ctxt;
void
md5_pad(ctxt)
md5_ctxt *ctxt;
{
unsigned int gap;
/* Don't count up padding. Keep md5_n. */
gap = MD5_BUFLEN - ctxt->md5_i;
if (gap > 8) {
bcopy((void *)md5_paddat,
(void *)(ctxt->md5_buf + ctxt->md5_i),
if (gap > 8)
{
bcopy((void *) md5_paddat,
(void *) (ctxt->md5_buf + ctxt->md5_i),
gap - sizeof(ctxt->md5_n));
} else {
}
else
{
/* including gap == 8 */
bcopy((void *)md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
bcopy((void *) md5_paddat, (void *) (ctxt->md5_buf + ctxt->md5_i),
gap);
md5_calc(ctxt->md5_buf, ctxt);
bcopy((void *)(md5_paddat + gap),
(void *)ctxt->md5_buf,
bcopy((void *) (md5_paddat + gap),
(void *) ctxt->md5_buf,
MD5_BUFLEN - sizeof(ctxt->md5_n));
}
@ -206,98 +215,192 @@ void md5_pad(ctxt)
md5_calc(ctxt->md5_buf, ctxt);
}
void md5_result(digest, ctxt)
uint8 *digest;
md5_ctxt *ctxt;
void
md5_result(digest, ctxt)
uint8 *digest;
md5_ctxt *ctxt;
{
/* 4 byte words */
#if BYTE_ORDER == LITTLE_ENDIAN
bcopy(&ctxt->md5_st8[0], digest, 16);
#endif
#if BYTE_ORDER == BIG_ENDIAN
digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
digest[0] = ctxt->md5_st8[3];
digest[1] = ctxt->md5_st8[2];
digest[2] = ctxt->md5_st8[1];
digest[3] = ctxt->md5_st8[0];
digest[4] = ctxt->md5_st8[7];
digest[5] = ctxt->md5_st8[6];
digest[6] = ctxt->md5_st8[5];
digest[7] = ctxt->md5_st8[4];
digest[8] = ctxt->md5_st8[11];
digest[9] = ctxt->md5_st8[10];
digest[10] = ctxt->md5_st8[9];
digest[11] = ctxt->md5_st8[8];
digest[12] = ctxt->md5_st8[15];
digest[13] = ctxt->md5_st8[14];
digest[14] = ctxt->md5_st8[13];
digest[15] = ctxt->md5_st8[12];
#endif
}
#if BYTE_ORDER == BIG_ENDIAN
uint32 X[16];
#endif
static void md5_calc(b64, ctxt)
uint8 *b64;
md5_ctxt *ctxt;
static void
md5_calc(b64, ctxt)
uint8 *b64;
md5_ctxt *ctxt;
{
uint32 A = ctxt->md5_sta;
uint32 B = ctxt->md5_stb;
uint32 C = ctxt->md5_stc;
uint32 D = ctxt->md5_std;
#if BYTE_ORDER == LITTLE_ENDIAN
uint32 *X = (uint32 *)b64;
uint32 *X = (uint32 *) b64;
#endif
#if BYTE_ORDER == BIG_ENDIAN
/* 4 byte words */
/* what a brute force but fast! */
uint8 *y = (uint8 *)X;
y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
uint8 *y = (uint8 *) X;
y[0] = b64[3];
y[1] = b64[2];
y[2] = b64[1];
y[3] = b64[0];
y[4] = b64[7];
y[5] = b64[6];
y[6] = b64[5];
y[7] = b64[4];
y[8] = b64[11];
y[9] = b64[10];
y[10] = b64[9];
y[11] = b64[8];
y[12] = b64[15];
y[13] = b64[14];
y[14] = b64[13];
y[15] = b64[12];
y[16] = b64[19];
y[17] = b64[18];
y[18] = b64[17];
y[19] = b64[16];
y[20] = b64[23];
y[21] = b64[22];
y[22] = b64[21];
y[23] = b64[20];
y[24] = b64[27];
y[25] = b64[26];
y[26] = b64[25];
y[27] = b64[24];
y[28] = b64[31];
y[29] = b64[30];
y[30] = b64[29];
y[31] = b64[28];
y[32] = b64[35];
y[33] = b64[34];
y[34] = b64[33];
y[35] = b64[32];
y[36] = b64[39];
y[37] = b64[38];
y[38] = b64[37];
y[39] = b64[36];
y[40] = b64[43];
y[41] = b64[42];
y[42] = b64[41];
y[43] = b64[40];
y[44] = b64[47];
y[45] = b64[46];
y[46] = b64[45];
y[47] = b64[44];
y[48] = b64[51];
y[49] = b64[50];
y[50] = b64[49];
y[51] = b64[48];
y[52] = b64[55];
y[53] = b64[54];
y[54] = b64[53];
y[55] = b64[52];
y[56] = b64[59];
y[57] = b64[58];
y[58] = b64[57];
y[59] = b64[56];
y[60] = b64[63];
y[61] = b64[62];
y[62] = b64[61];
y[63] = b64[60];
#endif
ROUND1(A, B, C, D, 0, Sa, 1); ROUND1(D, A, B, C, 1, Sb, 2);
ROUND1(C, D, A, B, 2, Sc, 3); ROUND1(B, C, D, A, 3, Sd, 4);
ROUND1(A, B, C, D, 4, Sa, 5); ROUND1(D, A, B, C, 5, Sb, 6);
ROUND1(C, D, A, B, 6, Sc, 7); ROUND1(B, C, D, A, 7, Sd, 8);
ROUND1(A, B, C, D, 8, Sa, 9); ROUND1(D, A, B, C, 9, Sb, 10);
ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
ROUND1(A, B, C, D, 0, Sa, 1);
ROUND1(D, A, B, C, 1, Sb, 2);
ROUND1(C, D, A, B, 2, Sc, 3);
ROUND1(B, C, D, A, 3, Sd, 4);
ROUND1(A, B, C, D, 4, Sa, 5);
ROUND1(D, A, B, C, 5, Sb, 6);
ROUND1(C, D, A, B, 6, Sc, 7);
ROUND1(B, C, D, A, 7, Sd, 8);
ROUND1(A, B, C, D, 8, Sa, 9);
ROUND1(D, A, B, C, 9, Sb, 10);
ROUND1(C, D, A, B, 10, Sc, 11);
ROUND1(B, C, D, A, 11, Sd, 12);
ROUND1(A, B, C, D, 12, Sa, 13);
ROUND1(D, A, B, C, 13, Sb, 14);
ROUND1(C, D, A, B, 14, Sc, 15);
ROUND1(B, C, D, A, 15, Sd, 16);
ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18);
ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20);
ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A, 4, Sh, 24);
ROUND2(A, B, C, D, 9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
ROUND2(C, D, A, B, 3, Sg, 27); ROUND2(B, C, D, A, 8, Sh, 28);
ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C, 2, Sf, 30);
ROUND2(C, D, A, B, 7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
ROUND2(A, B, C, D, 1, Se, 17);
ROUND2(D, A, B, C, 6, Sf, 18);
ROUND2(C, D, A, B, 11, Sg, 19);
ROUND2(B, C, D, A, 0, Sh, 20);
ROUND2(A, B, C, D, 5, Se, 21);
ROUND2(D, A, B, C, 10, Sf, 22);
ROUND2(C, D, A, B, 15, Sg, 23);
ROUND2(B, C, D, A, 4, Sh, 24);
ROUND2(A, B, C, D, 9, Se, 25);
ROUND2(D, A, B, C, 14, Sf, 26);
ROUND2(C, D, A, B, 3, Sg, 27);
ROUND2(B, C, D, A, 8, Sh, 28);
ROUND2(A, B, C, D, 13, Se, 29);
ROUND2(D, A, B, C, 2, Sf, 30);
ROUND2(C, D, A, B, 7, Sg, 31);
ROUND2(B, C, D, A, 12, Sh, 32);
ROUND3(A, B, C, D, 5, Si, 33); ROUND3(D, A, B, C, 8, Sj, 34);
ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
ROUND3(A, B, C, D, 1, Si, 37); ROUND3(D, A, B, C, 4, Sj, 38);
ROUND3(C, D, A, B, 7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C, 0, Sj, 42);
ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44);
ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48);
ROUND3(A, B, C, D, 5, Si, 33);
ROUND3(D, A, B, C, 8, Sj, 34);
ROUND3(C, D, A, B, 11, Sk, 35);
ROUND3(B, C, D, A, 14, Sl, 36);
ROUND3(A, B, C, D, 1, Si, 37);
ROUND3(D, A, B, C, 4, Sj, 38);
ROUND3(C, D, A, B, 7, Sk, 39);
ROUND3(B, C, D, A, 10, Sl, 40);
ROUND3(A, B, C, D, 13, Si, 41);
ROUND3(D, A, B, C, 0, Sj, 42);
ROUND3(C, D, A, B, 3, Sk, 43);
ROUND3(B, C, D, A, 6, Sl, 44);
ROUND3(A, B, C, D, 9, Si, 45);
ROUND3(D, A, B, C, 12, Sj, 46);
ROUND3(C, D, A, B, 15, Sk, 47);
ROUND3(B, C, D, A, 2, Sl, 48);
ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64);
ROUND4(A, B, C, D, 0, Sm, 49);
ROUND4(D, A, B, C, 7, Sn, 50);
ROUND4(C, D, A, B, 14, So, 51);
ROUND4(B, C, D, A, 5, Sp, 52);
ROUND4(A, B, C, D, 12, Sm, 53);
ROUND4(D, A, B, C, 3, Sn, 54);
ROUND4(C, D, A, B, 10, So, 55);
ROUND4(B, C, D, A, 1, Sp, 56);
ROUND4(A, B, C, D, 8, Sm, 57);
ROUND4(D, A, B, C, 15, Sn, 58);
ROUND4(C, D, A, B, 6, So, 59);
ROUND4(B, C, D, A, 13, Sp, 60);
ROUND4(A, B, C, D, 4, Sm, 61);
ROUND4(D, A, B, C, 11, Sn, 62);
ROUND4(C, D, A, B, 2, So, 63);
ROUND4(B, C, D, A, 9, Sp, 64);
ctxt->md5_sta += A;
ctxt->md5_stb += B;

View File

@ -1,4 +1,4 @@
/* $Id: md5.h,v 1.3 2001/01/09 16:07:13 momjian Exp $ */
/* $Id: md5.h,v 1.4 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: md5.h,v 1.3 2000/02/22 14:01:18 itojun Exp $ */
/*
@ -35,8 +35,10 @@
#define MD5_BUFLEN 64
typedef struct {
union {
typedef struct
{
union
{
uint32 md5_state32[4];
uint8 md5_state8[16];
} md5_st;
@ -47,7 +49,8 @@ typedef struct {
#define md5_std md5_st.md5_state32[3]
#define md5_st8 md5_st.md5_state8
union {
union
{
uint64 md5_count64;
uint8 md5_count8[8];
} md5_count;
@ -58,10 +61,10 @@ typedef struct {
uint8 md5_buf[MD5_BUFLEN];
} md5_ctxt;
extern void md5_init (md5_ctxt *);
extern void md5_loop (md5_ctxt *, uint8 *, unsigned int);
extern void md5_pad (md5_ctxt *);
extern void md5_result (uint8 *, md5_ctxt *);
extern void md5_init(md5_ctxt *);
extern void md5_loop(md5_ctxt *, uint8 *, unsigned int);
extern void md5_pad(md5_ctxt *);
extern void md5_result(uint8 *, md5_ctxt *);
/* compatibility */
#define MD5_CTX md5_ctxt
@ -73,4 +76,4 @@ do { \
md5_result((x), (y)); \
} while (0)
#endif /* ! _NETINET6_MD5_H_*/
#endif /* ! _NETINET6_MD5_H_ */

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mhash.c,v 1.2 2001/02/10 02:31:26 tgl Exp $
* $Id: mhash.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -36,22 +36,23 @@
#include <mhash.h>
static uint
pg_mhash_len(pg_digest *hash);
static uint8 *
pg_mhash_digest(pg_digest *hash, uint8 *src,
pg_mhash_len(pg_digest * hash);
static uint8 *pg_mhash_digest(pg_digest * hash, uint8 *src,
uint len, uint8 *buf);
static uint
pg_mhash_len(pg_digest *h) {
pg_mhash_len(pg_digest * h)
{
return mhash_get_block_size(h->misc.code);
}
static uint8 *
pg_mhash_digest(pg_digest *h, uint8 *src, uint len, uint8 *dst)
pg_mhash_digest(pg_digest * h, uint8 *src, uint len, uint8 *dst)
{
uint8 *res;
MHASH mh = mhash_init(h->misc.code);
mhash(mh, src, len);
res = mhash_end(mh);
@ -62,19 +63,23 @@ pg_mhash_digest(pg_digest *h, uint8 *src, uint len, uint8 *dst)
}
pg_digest *
pg_find_digest(pg_digest *h, char *name)
pg_find_digest(pg_digest * h, char *name)
{
size_t hnum, i, b;
size_t hnum,
i,
b;
char *mname;
hnum = mhash_count();
for (i = 0; i <= hnum; i++) {
for (i = 0; i <= hnum; i++)
{
mname = mhash_get_hash_name(i);
if (mname == NULL)
continue;
b = strcasecmp(name, mname);
free(mname);
if (!b) {
if (!b)
{
h->name = mhash_get_hash_name(i);
h->length = pg_mhash_len;
h->digest = pg_mhash_digest;
@ -84,4 +89,3 @@ pg_find_digest(pg_digest *h, char *name)
}
return NULL;
}

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: openssl.c,v 1.2 2001/02/10 02:31:26 tgl Exp $
* $Id: openssl.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -36,19 +36,20 @@
#include <evp.h>
static uint
pg_ossl_len(pg_digest *h);
pg_ossl_len(pg_digest * h);
static uint8 *
pg_ossl_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf);
pg_ossl_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static uint
pg_ossl_len(pg_digest *h) {
return EVP_MD_size((EVP_MD*)h->misc.ptr);
pg_ossl_len(pg_digest * h)
{
return EVP_MD_size((EVP_MD *) h->misc.ptr);
}
static uint8 *
pg_ossl_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
pg_ossl_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf)
{
EVP_MD *md = (EVP_MD*)h->misc.ptr;
EVP_MD *md = (EVP_MD *) h->misc.ptr;
EVP_MD_CTX ctx;
EVP_DigestInit(&ctx, md);
@ -61,11 +62,12 @@ pg_ossl_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
static int pg_openssl_initialized = 0;
pg_digest *
pg_find_digest(pg_digest *h, char *name)
pg_find_digest(pg_digest * h, char *name)
{
const EVP_MD *md;
if (!pg_openssl_initialized) {
if (!pg_openssl_initialized)
{
OpenSSL_add_all_digests();
pg_openssl_initialized = 1;
}
@ -77,9 +79,7 @@ pg_find_digest(pg_digest *h, char *name)
h->name = name;
h->length = pg_ossl_len;
h->digest = pg_ossl_digest;
h->misc.ptr = (void*)md;
h->misc.ptr = (void *) md;
return h;
}

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: pgcrypto.c,v 1.6 2001/02/10 02:31:26 tgl Exp $
* $Id: pgcrypto.c,v 1.7 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -49,7 +49,7 @@ Datum digest_exists(PG_FUNCTION_ARGS);
/* private stuff */
static pg_digest *
find_digest(pg_digest *hbuf, text *name, int silent);
find_digest(pg_digest * hbuf, text *name, int silent);
/* SQL function: hash(text, text) returns text */
@ -60,8 +60,10 @@ digest(PG_FUNCTION_ARGS)
{
text *arg;
text *name;
uint len, hlen;
pg_digest *h, _hbuf;
uint len,
hlen;
pg_digest *h,
_hbuf;
text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -72,7 +74,7 @@ digest(PG_FUNCTION_ARGS)
hlen = h->length(h);
res = (text *)palloc(hlen + VARHDRSZ);
res = (text *) palloc(hlen + VARHDRSZ);
VARATT_SIZEP(res) = hlen + VARHDRSZ;
arg = PG_GETARG_TEXT_P(0);
@ -93,7 +95,8 @@ Datum
digest_exists(PG_FUNCTION_ARGS)
{
text *name;
pg_digest _hbuf, *res;
pg_digest _hbuf,
*res;
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
@ -110,14 +113,15 @@ digest_exists(PG_FUNCTION_ARGS)
}
static pg_digest *
find_digest(pg_digest *hbuf, text *name, int silent)
find_digest(pg_digest * hbuf, text *name, int silent)
{
pg_digest *p;
char buf[NAMEDATALEN];
uint len;
len = VARSIZE(name) - VARHDRSZ;
if (len >= NAMEDATALEN) {
if (len >= NAMEDATALEN)
{
if (silent)
return NULL;
elog(ERROR, "Hash type does not exist (name too long)");
@ -132,4 +136,3 @@ find_digest(pg_digest *hbuf, text *name, int silent)
elog(ERROR, "Hash type does not exist: '%s'", buf);
return p;
}

View File

@ -26,25 +26,27 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: pgcrypto.h,v 1.2 2001/01/09 16:07:13 momjian Exp $
* $Id: pgcrypto.h,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#ifndef _PG_CRYPTO_H
#define _PG_CRYPTO_H
typedef struct _pg_digest pg_digest;
struct _pg_digest {
struct _pg_digest
{
char *name;
uint (*length)(pg_digest *h);
uint8 *(*digest)(pg_digest *h, uint8 *data,
uint (*length) (pg_digest * h);
uint8 *(*digest) (pg_digest * h, uint8 *data,
uint dlen, uint8 *buf);
/* private */
union {
union
{
uint code;
const void *ptr;
} misc;
};
extern pg_digest *pg_find_digest(pg_digest *hbuf, char *name);
extern pg_digest *pg_find_digest(pg_digest * hbuf, char *name);
#endif

View File

@ -1,4 +1,4 @@
/* $Id: sha1.c,v 1.4 2001/02/10 02:31:26 tgl Exp $ */
/* $Id: sha1.c,v 1.5 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: sha1.c,v 1.3 2000/02/22 14:01:18 itojun Exp $ */
/*
@ -41,15 +41,16 @@
/* sanity check */
#if BYTE_ORDER != BIG_ENDIAN
# if BYTE_ORDER != LITTLE_ENDIAN
# define unsupported 1
# endif
#if BYTE_ORDER != LITTLE_ENDIAN
#define unsupported 1
#endif
#endif
#ifndef unsupported
/* constant table */
static uint32 _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
static uint32 _K[] = {0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6};
#define K(t) _K[(t) / 20]
#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
@ -81,80 +82,141 @@ static uint32 _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
sha1_step(ctxt); \
}
static void sha1_step (struct sha1_ctxt *);
static void sha1_step(struct sha1_ctxt *);
static void
sha1_step(ctxt)
struct sha1_ctxt *ctxt;
struct sha1_ctxt *ctxt;
{
uint32 a, b, c, d, e;
size_t t, s;
uint32 a,
b,
c,
d,
e;
size_t t,
s;
uint32 tmp;
#if BYTE_ORDER == LITTLE_ENDIAN
struct sha1_ctxt tctxt;
bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
ctxt->m.b8[0] = tctxt.m.b8[3];
ctxt->m.b8[1] = tctxt.m.b8[2];
ctxt->m.b8[2] = tctxt.m.b8[1];
ctxt->m.b8[3] = tctxt.m.b8[0];
ctxt->m.b8[4] = tctxt.m.b8[7];
ctxt->m.b8[5] = tctxt.m.b8[6];
ctxt->m.b8[6] = tctxt.m.b8[5];
ctxt->m.b8[7] = tctxt.m.b8[4];
ctxt->m.b8[8] = tctxt.m.b8[11];
ctxt->m.b8[9] = tctxt.m.b8[10];
ctxt->m.b8[10] = tctxt.m.b8[9];
ctxt->m.b8[11] = tctxt.m.b8[8];
ctxt->m.b8[12] = tctxt.m.b8[15];
ctxt->m.b8[13] = tctxt.m.b8[14];
ctxt->m.b8[14] = tctxt.m.b8[13];
ctxt->m.b8[15] = tctxt.m.b8[12];
ctxt->m.b8[16] = tctxt.m.b8[19];
ctxt->m.b8[17] = tctxt.m.b8[18];
ctxt->m.b8[18] = tctxt.m.b8[17];
ctxt->m.b8[19] = tctxt.m.b8[16];
ctxt->m.b8[20] = tctxt.m.b8[23];
ctxt->m.b8[21] = tctxt.m.b8[22];
ctxt->m.b8[22] = tctxt.m.b8[21];
ctxt->m.b8[23] = tctxt.m.b8[20];
ctxt->m.b8[24] = tctxt.m.b8[27];
ctxt->m.b8[25] = tctxt.m.b8[26];
ctxt->m.b8[26] = tctxt.m.b8[25];
ctxt->m.b8[27] = tctxt.m.b8[24];
ctxt->m.b8[28] = tctxt.m.b8[31];
ctxt->m.b8[29] = tctxt.m.b8[30];
ctxt->m.b8[30] = tctxt.m.b8[29];
ctxt->m.b8[31] = tctxt.m.b8[28];
ctxt->m.b8[32] = tctxt.m.b8[35];
ctxt->m.b8[33] = tctxt.m.b8[34];
ctxt->m.b8[34] = tctxt.m.b8[33];
ctxt->m.b8[35] = tctxt.m.b8[32];
ctxt->m.b8[36] = tctxt.m.b8[39];
ctxt->m.b8[37] = tctxt.m.b8[38];
ctxt->m.b8[38] = tctxt.m.b8[37];
ctxt->m.b8[39] = tctxt.m.b8[36];
ctxt->m.b8[40] = tctxt.m.b8[43];
ctxt->m.b8[41] = tctxt.m.b8[42];
ctxt->m.b8[42] = tctxt.m.b8[41];
ctxt->m.b8[43] = tctxt.m.b8[40];
ctxt->m.b8[44] = tctxt.m.b8[47];
ctxt->m.b8[45] = tctxt.m.b8[46];
ctxt->m.b8[46] = tctxt.m.b8[45];
ctxt->m.b8[47] = tctxt.m.b8[44];
ctxt->m.b8[48] = tctxt.m.b8[51];
ctxt->m.b8[49] = tctxt.m.b8[50];
ctxt->m.b8[50] = tctxt.m.b8[49];
ctxt->m.b8[51] = tctxt.m.b8[48];
ctxt->m.b8[52] = tctxt.m.b8[55];
ctxt->m.b8[53] = tctxt.m.b8[54];
ctxt->m.b8[54] = tctxt.m.b8[53];
ctxt->m.b8[55] = tctxt.m.b8[52];
ctxt->m.b8[56] = tctxt.m.b8[59];
ctxt->m.b8[57] = tctxt.m.b8[58];
ctxt->m.b8[58] = tctxt.m.b8[57];
ctxt->m.b8[59] = tctxt.m.b8[56];
ctxt->m.b8[60] = tctxt.m.b8[63];
ctxt->m.b8[61] = tctxt.m.b8[62];
ctxt->m.b8[62] = tctxt.m.b8[61];
ctxt->m.b8[63] = tctxt.m.b8[60];
#endif
a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
a = H(0);
b = H(1);
c = H(2);
d = H(3);
e = H(4);
for (t = 0; t < 20; t++) {
for (t = 0; t < 20; t++)
{
s = t & 0x0f;
if (t >= 16) {
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
}
if (t >= 16)
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 20; t < 40; t++) {
for (t = 20; t < 40; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 40; t < 60; t++) {
for (t = 40; t < 60; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 60; t < 80; t++) {
for (t = 60; t < 80; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
H(0) = H(0) + a;
@ -170,7 +232,7 @@ sha1_step(ctxt)
void
sha1_init(ctxt)
struct sha1_ctxt *ctxt;
struct sha1_ctxt *ctxt;
{
bzero(ctxt, sizeof(struct sha1_ctxt));
H(0) = 0x67452301;
@ -182,16 +244,17 @@ sha1_init(ctxt)
void
sha1_pad(ctxt)
struct sha1_ctxt *ctxt;
struct sha1_ctxt *ctxt;
{
size_t padlen; /*pad length in bytes*/
size_t padlen; /* pad length in bytes */
size_t padstart;
PUTPAD(0x80);
padstart = COUNT % 64;
padlen = 64 - padstart;
if (padlen < 8) {
if (padlen < 8)
{
bzero(&ctxt->m.b8[padstart], padlen);
COUNT += padlen;
COUNT %= 64;
@ -203,23 +266,31 @@ sha1_pad(ctxt)
COUNT += (padlen - 8);
COUNT %= 64;
#if BYTE_ORDER == BIG_ENDIAN
PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
PUTPAD(ctxt->c.b8[0]);
PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[7]);
#else
PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
PUTPAD(ctxt->c.b8[7]);
PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[0]);
#endif
}
void
sha1_loop(ctxt, input0, len)
struct sha1_ctxt *ctxt;
const caddr_t input0;
size_t len;
struct sha1_ctxt *ctxt;
const caddr_t input0;
size_t len;
{
const uint8 *input;
size_t gaplen;
@ -227,10 +298,11 @@ sha1_loop(ctxt, input0, len)
size_t off;
size_t copysiz;
input = (const uint8 *)input0;
input = (const uint8 *) input0;
off = 0;
while (off < len) {
while (off < len)
{
gapstart = COUNT % 64;
gaplen = 64 - gapstart;
@ -247,27 +319,37 @@ sha1_loop(ctxt, input0, len)
void
sha1_result(ctxt, digest0)
struct sha1_ctxt *ctxt;
caddr_t digest0;
struct sha1_ctxt *ctxt;
caddr_t digest0;
{
uint8 *digest;
digest = (uint8 *)digest0;
digest = (uint8 *) digest0;
sha1_pad(ctxt);
#if BYTE_ORDER == BIG_ENDIAN
bcopy(&ctxt->h.b8[0], digest, 20);
#else
digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
digest[0] = ctxt->h.b8[3];
digest[1] = ctxt->h.b8[2];
digest[2] = ctxt->h.b8[1];
digest[3] = ctxt->h.b8[0];
digest[4] = ctxt->h.b8[7];
digest[5] = ctxt->h.b8[6];
digest[6] = ctxt->h.b8[5];
digest[7] = ctxt->h.b8[4];
digest[8] = ctxt->h.b8[11];
digest[9] = ctxt->h.b8[10];
digest[10] = ctxt->h.b8[9];
digest[11] = ctxt->h.b8[8];
digest[12] = ctxt->h.b8[15];
digest[13] = ctxt->h.b8[14];
digest[14] = ctxt->h.b8[13];
digest[15] = ctxt->h.b8[12];
digest[16] = ctxt->h.b8[19];
digest[17] = ctxt->h.b8[18];
digest[18] = ctxt->h.b8[17];
digest[19] = ctxt->h.b8[16];
#endif
}
#endif /*unsupported*/
#endif /* unsupported */

View File

@ -1,4 +1,4 @@
/* $Id: sha1.h,v 1.3 2001/01/09 16:07:13 momjian Exp $ */
/* $Id: sha1.h,v 1.4 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: sha1.h,v 1.4 2000/02/22 14:01:18 itojun Exp $ */
/*
@ -38,33 +38,38 @@
#ifndef _NETINET6_SHA1_H_
#define _NETINET6_SHA1_H_
struct sha1_ctxt {
union {
struct sha1_ctxt
{
union
{
uint8 b8[20];
uint32 b32[5];
} h;
union {
union
{
uint8 b8[8];
uint64 b64[1];
} c;
union {
union
{
uint8 b8[64];
uint32 b32[16];
} m;
uint8 count;
};
extern void sha1_init (struct sha1_ctxt *);
extern void sha1_pad (struct sha1_ctxt *);
extern void sha1_loop (struct sha1_ctxt *, const caddr_t, size_t);
extern void sha1_result (struct sha1_ctxt *, caddr_t);
extern void sha1_init(struct sha1_ctxt *);
extern void sha1_pad(struct sha1_ctxt *);
extern void sha1_loop(struct sha1_ctxt *, const caddr_t, size_t);
extern void sha1_result(struct sha1_ctxt *, caddr_t);
/* compatibilty with other SHA1 source codes */
typedef struct sha1_ctxt SHA1_CTX;
#define SHA1Init(x) sha1_init((x))
#define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
#define SHA1Final(x, y) sha1_result((y), (x))
#define SHA1_RESULTLEN (160/8)
#endif /*_NETINET6_SHA1_H_*/
#endif /* _NETINET6_SHA1_H_ */

View File

@ -19,15 +19,17 @@ PG_FUNCTION_INFO_V1(_rserv_debug_);
Datum _rserv_log_(PG_FUNCTION_ARGS);
Datum _rserv_sync_(PG_FUNCTION_ARGS);
Datum _rserv_debug_(PG_FUNCTION_ARGS);
#else
HeapTuple _rserv_log_(void);
int32 _rserv_sync_(int32);
int32 _rserv_debug_(int32);
#endif
static int debug = 0;
static char* OutputValue(char *key, char *buf, int size);
static char *OutputValue(char *key, char *buf, int size);
#ifdef PG_FUNCTION_INFO_V1
Datum
@ -203,6 +205,7 @@ _rserv_sync_(int32 server)
{
#ifdef PG_FUNCTION_INFO_V1
int32 server = PG_GETARG_INT32(0);
#endif
char sql[8192];
char buf[8192];
@ -248,6 +251,7 @@ _rserv_debug_(int32 newval)
{
#ifdef PG_FUNCTION_INFO_V1
int32 newval = PG_GETARG_INT32(0);
#endif
int32 oldval = debug;
@ -258,7 +262,7 @@ _rserv_debug_(int32 newval)
#define ExtendBy 1024
static char*
static char *
OutputValue(char *key, char *buf, int size)
{
int i = 0;
@ -267,25 +271,31 @@ OutputValue(char *key, char *buf, int size)
int slen = 0;
size--;
for ( ; ; )
for (;;)
{
switch (*key)
{
case '\\': subst ="\\\\";
case '\\':
subst = "\\\\";
slen = 2;
break;
case ' ': subst = "\\011";
case ' ':
subst = "\\011";
slen = 4;
break;
case '\n': subst = "\\012";
case '\n':
subst = "\\012";
slen = 4;
break;
case '\'': subst = "\\047";
case '\'':
subst = "\\047";
slen = 4;
break;
case '\0': out[i] = 0;
return(out);
default: slen = 1;
case '\0':
out[i] = 0;
return (out);
default:
slen = 1;
break;
}
@ -293,13 +303,13 @@ OutputValue(char *key, char *buf, int size)
{
if (out == buf)
{
out = (char*) palloc(size + ExtendBy);
out = (char *) palloc(size + ExtendBy);
strncpy(out, buf, i);
size += ExtendBy;
}
else
{
out = (char*) repalloc(out, size + ExtendBy);
out = (char *) repalloc(out, size + ExtendBy);
size += ExtendBy;
}
}
@ -314,6 +324,6 @@ OutputValue(char *key, char *buf, int size)
key++;
}
return(out);
return (out);
}

View File

@ -4,76 +4,81 @@
#include "utils/elog.h"
static char * PARSE_BUFFER;
static char * PARSE_BUFFER_PTR;
static char *PARSE_BUFFER;
static char *PARSE_BUFFER_PTR;
static unsigned int PARSE_BUFFER_SIZE;
static unsigned int SCANNER_POS;
void set_parse_buffer( char* s );
void reset_parse_buffer( void );
int read_parse_buffer( void );
char * parse_buffer( void );
char * parse_buffer_ptr( void );
unsigned int parse_buffer_curr_char( void );
unsigned int parse_buffer_size( void );
unsigned int parse_buffer_pos( void );
void set_parse_buffer(char *s);
void reset_parse_buffer(void);
int read_parse_buffer(void);
char *parse_buffer(void);
char *parse_buffer_ptr(void);
unsigned int parse_buffer_curr_char(void);
unsigned int parse_buffer_size(void);
unsigned int parse_buffer_pos(void);
extern void seg_flush_scanner_buffer(void); /* defined in segscan.l */
void set_parse_buffer( char* s )
void
set_parse_buffer(char *s)
{
PARSE_BUFFER = s;
PARSE_BUFFER_SIZE = strlen(s);
if ( PARSE_BUFFER_SIZE == 0 ) {
if (PARSE_BUFFER_SIZE == 0)
elog(ERROR, "seg_in: can't parse an empty string");
}
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
}
void reset_parse_buffer( void )
void
reset_parse_buffer(void)
{
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
seg_flush_scanner_buffer();
}
int read_parse_buffer( void )
int
read_parse_buffer(void)
{
int c;
/*
c = *PARSE_BUFFER_PTR++;
SCANNER_POS++;
* c = *PARSE_BUFFER_PTR++; SCANNER_POS++;
*/
c = PARSE_BUFFER[SCANNER_POS];
if(SCANNER_POS < PARSE_BUFFER_SIZE)
if (SCANNER_POS < PARSE_BUFFER_SIZE)
SCANNER_POS++;
return c;
}
char * parse_buffer( void )
char *
parse_buffer(void)
{
return PARSE_BUFFER;
}
unsigned int parse_buffer_curr_char( void )
unsigned int
parse_buffer_curr_char(void)
{
return PARSE_BUFFER[SCANNER_POS];
}
char * parse_buffer_ptr( void )
char *
parse_buffer_ptr(void)
{
return PARSE_BUFFER_PTR;
}
unsigned int parse_buffer_pos( void )
unsigned int
parse_buffer_pos(void)
{
return SCANNER_POS;
}
unsigned int parse_buffer_size( void )
unsigned int
parse_buffer_size(void)
{
return PARSE_BUFFER_SIZE;
}

View File

@ -1,8 +1,8 @@
extern void set_parse_buffer( char* s );
extern void reset_parse_buffer( void );
extern int read_parse_buffer( void );
extern char * parse_buffer( void );
extern char * parse_buffer_ptr( void );
extern unsigned int parse_buffer_curr_char( void );
extern unsigned int parse_buffer_pos( void );
extern unsigned int parse_buffer_size( void );
extern void set_parse_buffer(char *s);
extern void reset_parse_buffer(void);
extern int read_parse_buffer(void);
extern char *parse_buffer(void);
extern char *parse_buffer_ptr(void);
extern unsigned int parse_buffer_curr_char(void);
extern unsigned int parse_buffer_pos(void);
extern unsigned int parse_buffer_size(void);

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
typedef struct SEG {
typedef struct SEG
{
float lower;
float upper;
char l_sigd;

View File

@ -1,4 +1,4 @@
/* $Header: /cvsroot/pgsql/contrib/soundex/Attic/soundex.c,v 1.10 2001/02/10 02:31:26 tgl Exp $ */
/* $Header: /cvsroot/pgsql/contrib/soundex/Attic/soundex.c,v 1.11 2001/03/22 03:59:10 momjian Exp $ */
#include "postgres.h"
#include <ctype.h>
@ -42,6 +42,7 @@ text_soundex(PG_FUNCTION_ARGS)
/* ABCDEFGHIJKLMNOPQRSTUVWXYZ */
static const char *soundex_table = "01230120022455012623010202";
#define soundex_code(letter) soundex_table[toupper((unsigned char) (letter)) - 'A']
@ -98,7 +99,7 @@ soundex(const char *instr, char *outstr)
#ifdef SOUNDEX_TEST
int
main (int argc, char *argv[])
main(int argc, char *argv[])
{
if (argc < 2)
{
@ -114,4 +115,5 @@ main (int argc, char *argv[])
return 0;
}
}
#endif /* SOUNDEX_TEST */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.8 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.9 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -76,6 +76,7 @@ vacuumlo(char *database, int verbose)
return -1;
}
PQclear(res);
/*
* Vacuum the temp table so that planner will generate decent plans
* for the DELETEs below.
@ -96,13 +97,13 @@ vacuumlo(char *database, int verbose)
/*
* Now find any candidate tables who have columns of type oid.
*
* NOTE: the temp table formed above is ignored, because its real
* table name will be pg_something. Also, pg_largeobject will be
* ignored. If either of these were scanned, obviously we'd end up
* with nothing to delete...
* NOTE: the temp table formed above is ignored, because its real table
* name will be pg_something. Also, pg_largeobject will be ignored.
* If either of these were scanned, obviously we'd end up with nothing
* to delete...
*
* NOTE: the system oid column is ignored, as it has attnum < 1.
* This shouldn't matter for correctness, but it saves time.
* NOTE: the system oid column is ignored, as it has attnum < 1. This
* shouldn't matter for correctness, but it saves time.
*/
buf[0] = '\0';
strcat(buf, "SELECT c.relname, a.attname ");
@ -135,9 +136,9 @@ vacuumlo(char *database, int verbose)
fprintf(stdout, "Checking %s in %s\n", field, table);
/*
* We use a DELETE with implicit join for efficiency. This
* is a Postgres-ism and not portable to other DBMSs, but
* then this whole program is a Postgres-ism.
* We use a DELETE with implicit join for efficiency. This is a
* Postgres-ism and not portable to other DBMSs, but then this
* whole program is a Postgres-ism.
*/
sprintf(buf, "DELETE FROM vacuum_l WHERE lo = \"%s\".\"%s\" ",
table, field);
@ -159,8 +160,8 @@ vacuumlo(char *database, int verbose)
/*
* Run the actual deletes in a single transaction. Note that this
* would be a bad idea in pre-7.1 Postgres releases (since rolling
* back a table delete used to cause problems), but it should
* be safe now.
* back a table delete used to cause problems), but it should be safe
* now.
*/
res = PQexec(conn, "begin");
PQclear(res);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.69 2001/01/24 19:42:46 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@ -306,8 +306,8 @@ nocachegetattr(HeapTuple tuple,
int j;
/*
* In for(), we test <= and not < because we want to see
* if we can go past it in initializing offsets.
* In for(), we test <= and not < because we want to see if we
* can go past it in initializing offsets.
*/
for (j = 0; j <= attnum; j++)
{
@ -321,9 +321,9 @@ nocachegetattr(HeapTuple tuple,
}
/*
* If slow is false, and we got here, we know that we have a tuple with
* no nulls or varlenas before the target attribute. If possible, we
* also want to initialize the remainder of the attribute cached
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or varlenas before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.52 2001/02/22 21:48:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -45,9 +45,11 @@ index_formtuple(TupleDesc tupleDescriptor,
bool hasnull = false;
uint16 tupmask = 0;
int numberOfAttributes = tupleDescriptor->natts;
#ifdef TOAST_INDEX_HACK
Datum untoasted_value[INDEX_MAX_KEYS];
bool untoasted_free[INDEX_MAX_KEYS];
#endif
if (numberOfAttributes > INDEX_MAX_KEYS)
@ -79,8 +81,8 @@ index_formtuple(TupleDesc tupleDescriptor,
}
/*
* If value is above size target, and is of a compressible datatype,
* try to compress it in-line.
* If value is above size target, and is of a compressible
* datatype, try to compress it in-line.
*/
if (VARATT_SIZE(untoasted_value[i]) > TOAST_INDEX_TARGET &&
!VARATT_IS_EXTENDED(untoasted_value[i]) &&
@ -146,8 +148,8 @@ index_formtuple(TupleDesc tupleDescriptor,
/*
* We do this because DataFill wants to initialize a "tupmask" which
* is used for HeapTuples, but we want an indextuple infomask. The
* only relevant info is the "has variable attributes" field.
* We have already set the hasnull bit above.
* only relevant info is the "has variable attributes" field. We have
* already set the hasnull bit above.
*/
if (tupmask & HEAP_HASVARLENA)
@ -315,9 +317,9 @@ nocache_index_getattr(IndexTuple tup,
}
/*
* If slow is false, and we got here, we know that we have a tuple with
* no nulls or varlenas before the target attribute. If possible, we
* also want to initialize the remainder of the attribute cached
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or varlenas before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
@ -391,10 +393,8 @@ nocache_index_getattr(IndexTuple tup,
usecache = false;
}
else
{
off += att[i]->attlen;
}
}
off = att_align(off, att[attnum]->attlen, att[attnum]->attalign);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.57 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -51,7 +51,7 @@ getTypeOutputInfo(Oid type, Oid *typOutput, Oid *typElem,
*typOutput = pt->typoutput;
*typElem = pt->typelem;
*typIsVarlena = (! pt->typbyval) && (pt->typlen == -1);
*typIsVarlena = (!pt->typbyval) && (pt->typlen == -1);
ReleaseSysCache(typeTuple);
return OidIsValid(*typOutput);
}
@ -200,9 +200,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
if (OidIsValid(thisState->typoutput))
{
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
* If we have a toasted datum, forcibly detoast it here to
* avoid memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@ -308,9 +309,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena))
{
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
* If we have a toasted datum, forcibly detoast it here to
* avoid memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@ -405,6 +407,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
/* send # of bytes, and opaque data */
if (thisState->typisvarlena)
{
/*
* If we have a toasted datum, must detoast before sending.
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.71 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@ -242,9 +242,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We do not need to check every single field here, and in fact
* some fields such as attdispersion probably shouldn't be
* compared. We can also disregard attnum (it was used to
* place the row in the attrs array) and everything derived
* from the column datatype.
* compared. We can also disregard attnum (it was used to place
* the row in the attrs array) and everything derived from the
* column datatype.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@ -276,8 +276,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We can't assume that the items are always read from the
* system catalogs in the same order; so use the adnum field to
* identify the matching item to compare.
* system catalogs in the same order; so use the adnum field
* to identify the matching item to compare.
*/
for (j = 0; j < n; defval2++, j++)
{
@ -298,9 +298,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
ConstrCheck *check2 = constr2->check;
/*
* Similarly, don't assume that the checks are always read
* in the same order; match them up by name and contents.
* (The name *should* be unique, but...)
* Similarly, don't assume that the checks are always read in
* the same order; match them up by name and contents. (The
* name *should* be unique, but...)
*/
for (j = 0; j < n; check2++, j++)
{

View File

@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.71 2001/03/07 21:20:26 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.72 2001/03/22 03:59:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -34,31 +34,31 @@ static void gistdoinsert(Relation r,
IndexTuple itup,
InsertIndexResult *res,
GISTSTATE *GISTstate);
static int gistlayerinsert( Relation r, BlockNumber blkno,
static int gistlayerinsert(Relation r, BlockNumber blkno,
IndexTuple **itup,
int *len,
InsertIndexResult *res,
GISTSTATE *giststate );
static OffsetNumber gistwritebuffer( Relation r,
GISTSTATE *giststate);
static OffsetNumber gistwritebuffer(Relation r,
Page page,
IndexTuple *itup,
int len,
OffsetNumber off,
GISTSTATE *giststate );
static int gistnospace( Page page,
IndexTuple *itvec, int len );
static IndexTuple * gistreadbuffer( Relation r,
Buffer buffer, int *len );
static IndexTuple * gistjoinvector(
GISTSTATE *giststate);
static int gistnospace(Page page,
IndexTuple *itvec, int len);
static IndexTuple *gistreadbuffer(Relation r,
Buffer buffer, int *len);
static IndexTuple *gistjoinvector(
IndexTuple *itvec, int *len,
IndexTuple *additvec, int addlen );
static IndexTuple gistunion( Relation r, IndexTuple *itvec,
int len, GISTSTATE *giststate );
static IndexTuple gistgetadjusted( Relation r,
IndexTuple *additvec, int addlen);
static IndexTuple gistunion(Relation r, IndexTuple *itvec,
int len, GISTSTATE *giststate);
static IndexTuple gistgetadjusted(Relation r,
IndexTuple oldtup,
IndexTuple addtup,
GISTSTATE *giststate );
static IndexTuple * gistSplit(Relation r,
GISTSTATE *giststate);
static IndexTuple *gistSplit(Relation r,
Buffer buffer,
IndexTuple *itup,
int *len,
@ -80,6 +80,7 @@ static void gistcentryinit(GISTSTATE *giststate,
#undef GISTDEBUG
#ifdef GISTDEBUG
static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff);
#endif
/*
@ -92,8 +93,10 @@ gistbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -105,9 +108,11 @@ gistbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
GISTSTATE giststate;
@ -181,6 +186,7 @@ gistbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -262,9 +268,7 @@ gistbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -297,7 +301,7 @@ gistbuild(PG_FUNCTION_ARGS)
}
#ifdef GISTDEBUG
gist_dumptree(index, 0, GISTP_ROOT, 0);
gist_dumptree(index, 0, GISTP_ROOT, 0);
#endif
PG_RETURN_VOID();
@ -316,8 +320,10 @@ gistinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
InsertIndexResult res;
IndexTuple itup;
@ -406,32 +412,36 @@ gistPageAddItem(GISTSTATE *giststate,
}
static void
gistdoinsert( Relation r,
gistdoinsert(Relation r,
IndexTuple itup,
InsertIndexResult *res,
GISTSTATE *giststate ) {
GISTSTATE *giststate)
{
IndexTuple *instup;
int i,ret,len = 1;
int i,
ret,
len = 1;
instup = ( IndexTuple* ) palloc( sizeof(IndexTuple) );
instup[0] = ( IndexTuple ) palloc( IndexTupleSize( itup ) );
memcpy( instup[0], itup, IndexTupleSize( itup ) );
instup = (IndexTuple *) palloc(sizeof(IndexTuple));
instup[0] = (IndexTuple) palloc(IndexTupleSize(itup));
memcpy(instup[0], itup, IndexTupleSize(itup));
ret = gistlayerinsert(r, GISTP_ROOT, &instup, &len, res, giststate);
if ( ret & SPLITED )
gistnewroot( giststate, r, instup, len );
if (ret & SPLITED)
gistnewroot(giststate, r, instup, len);
for(i=0;i<len;i++)
pfree( instup[i] );
pfree( instup );
for (i = 0; i < len; i++)
pfree(instup[i]);
pfree(instup);
}
static int
gistlayerinsert( Relation r, BlockNumber blkno,
gistlayerinsert(Relation r, BlockNumber blkno,
IndexTuple **itup, /* in - out, has compressed entry */
int *len , /* in - out */
int *len, /* in - out */
InsertIndexResult *res, /* out */
GISTSTATE *giststate ) {
GISTSTATE *giststate)
{
Buffer buffer;
Page page;
OffsetNumber child;
@ -442,7 +452,8 @@ gistlayerinsert( Relation r, BlockNumber blkno,
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
if (!(opaque->flags & F_LEAF)) {
if (!(opaque->flags & F_LEAF))
{
/* internal page, so we must walk on tree */
/* len IS equial 1 */
ItemId iid;
@ -450,36 +461,38 @@ gistlayerinsert( Relation r, BlockNumber blkno,
ItemPointerData oldtid;
IndexTuple oldtup;
child = gistchoose( r, page, *(*itup), giststate );
child = gistchoose(r, page, *(*itup), giststate);
iid = PageGetItemId(page, child);
oldtup = (IndexTuple) PageGetItem(page, iid);
nblkno = ItemPointerGetBlockNumber(&(oldtup->t_tid));
/*
* After this call:
* 1. if child page was splited, then itup contains
* keys for each page
* 2. if child page wasn't splited, then itup contains
* additional for adjustement of current key
* After this call: 1. if child page was splited, then itup
* contains keys for each page 2. if child page wasn't splited,
* then itup contains additional for adjustement of current key
*/
ret = gistlayerinsert( r, nblkno, itup, len, res, giststate );
ret = gistlayerinsert(r, nblkno, itup, len, res, giststate);
/* nothing inserted in child */
if ( ! (ret & INSERTED) ) {
if (!(ret & INSERTED))
{
ReleaseBuffer(buffer);
return 0x00;
}
/* child does not splited */
if ( ! (ret & SPLITED) ) {
IndexTuple newtup = gistgetadjusted( r, oldtup, (*itup)[0], giststate );
if ( ! newtup ) {
if (!(ret & SPLITED))
{
IndexTuple newtup = gistgetadjusted(r, oldtup, (*itup)[0], giststate);
if (!newtup)
{
/* not need to update key */
ReleaseBuffer(buffer);
return 0x00;
}
pfree( (*itup)[0] ); /* !!! */
pfree((*itup)[0]); /* !!! */
(*itup)[0] = newtup;
}
@ -492,43 +505,54 @@ gistlayerinsert( Relation r, BlockNumber blkno,
ret = INSERTED;
if ( gistnospace(page, (*itup), *len) ) {
if (gistnospace(page, (*itup), *len))
{
/* no space for insertion */
IndexTuple *itvec;
int tlen;
ret |= SPLITED;
itvec = gistreadbuffer( r, buffer, &tlen );
itvec = gistjoinvector( itvec, &tlen, (*itup), *len );
pfree( (*itup) );
(*itup) = gistSplit( r, buffer, itvec, &tlen, giststate,
(opaque->flags & F_LEAF) ? res : NULL ); /*res only for inserting in leaf*/
ReleaseBuffer( buffer );
pfree( itvec );
itvec = gistreadbuffer(r, buffer, &tlen);
itvec = gistjoinvector(itvec, &tlen, (*itup), *len);
pfree((*itup));
(*itup) = gistSplit(r, buffer, itvec, &tlen, giststate,
(opaque->flags & F_LEAF) ? res : NULL); /* res only for
* inserting in leaf */
ReleaseBuffer(buffer);
pfree(itvec);
*len = tlen; /* now tlen >= 2 */
} else {
}
else
{
/* enogth space */
OffsetNumber off, l;
OffsetNumber off,
l;
off = ( PageIsEmpty(page) ) ?
off = (PageIsEmpty(page)) ?
FirstOffsetNumber
:
OffsetNumberNext(PageGetMaxOffsetNumber(page));
l = gistwritebuffer( r, page, (*itup), *len, off, giststate );
l = gistwritebuffer(r, page, (*itup), *len, off, giststate);
WriteBuffer(buffer);
/* set res if insert into leaf page, in
this case, len = 1 always */
if ( res && (opaque->flags & F_LEAF) )
/*
* set res if insert into leaf page, in this case, len = 1 always
*/
if (res && (opaque->flags & F_LEAF))
ItemPointerSet(&((*res)->pointerData), blkno, l);
if ( *len > 1 ) { /* previos insert ret & SPLITED != 0 */
if (*len > 1)
{ /* previos insert ret & SPLITED != 0 */
int i;
/* child was splited, so we must form union
* for insertion in parent */
/*
* child was splited, so we must form union for insertion in
* parent
*/
IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
for(i=0; i<*len; i++)
pfree( (*itup)[i] );
for (i = 0; i < *len; i++)
pfree((*itup)[i]);
(*itup)[0] = newtup;
*len = 1;
}
@ -541,18 +565,20 @@ gistlayerinsert( Relation r, BlockNumber blkno,
* Write itup vector to page, has no control of free space
*/
static OffsetNumber
gistwritebuffer( Relation r, Page page, IndexTuple *itup,
int len, OffsetNumber off, GISTSTATE *giststate) {
gistwritebuffer(Relation r, Page page, IndexTuple *itup,
int len, OffsetNumber off, GISTSTATE *giststate)
{
OffsetNumber l = InvalidOffsetNumber;
int i;
GISTENTRY tmpdentry;
IndexTuple newtup;
for(i=0; i<len; i++) {
for (i = 0; i < len; i++)
{
l = gistPageAddItem(giststate, r, page,
(Item) itup[i], IndexTupleSize(itup[i]),
off, LP_USED, &tmpdentry, &newtup);
off = OffsetNumberNext( off );
off = OffsetNumberNext(off);
if (tmpdentry.pred != (((char *) itup[i]) + sizeof(IndexTupleData)) && tmpdentry.pred)
pfree(tmpdentry.pred);
if (itup[i] != newtup)
@ -565,11 +591,13 @@ gistwritebuffer( Relation r, Page page, IndexTuple *itup,
* Check space for itup vector on page
*/
static int
gistnospace( Page page, IndexTuple *itvec, int len ) {
gistnospace(Page page, IndexTuple *itvec, int len)
{
int size = 0;
int i;
for(i=0; i<len; i++)
size += IndexTupleSize( itvec[i] )+4; /* ??? */
for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + 4; /* ??? */
return (PageGetFreeSpace(page) < size);
}
@ -578,16 +606,18 @@ gistnospace( Page page, IndexTuple *itvec, int len ) {
* Read buffer into itup vector
*/
static IndexTuple *
gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
OffsetNumber i, maxoff;
gistreadbuffer(Relation r, Buffer buffer, int *len /* out */ )
{
OffsetNumber i,
maxoff;
IndexTuple *itvec;
Page p = (Page) BufferGetPage(buffer);
*len=0;
*len = 0;
maxoff = PageGetMaxOffsetNumber(p);
itvec = palloc( sizeof(IndexTuple) * maxoff );
for(i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
itvec[ (*len)++ ] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
itvec = palloc(sizeof(IndexTuple) * maxoff);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
itvec[(*len)++] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
return itvec;
}
@ -596,9 +626,10 @@ gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
* join two vectors into one
*/
static IndexTuple *
gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen ) {
itvec = (IndexTuple*) repalloc( (void*)itvec, sizeof(IndexTuple) * ( (*len) + addlen ) );
memmove( &itvec[*len], additvec, sizeof(IndexTuple) * addlen );
gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
{
itvec = (IndexTuple *) repalloc((void *) itvec, sizeof(IndexTuple) * ((*len) + addlen));
memmove(&itvec[*len], additvec, sizeof(IndexTuple) * addlen);
*len += addlen;
return itvec;
}
@ -607,10 +638,12 @@ gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen )
* return union of itup vector
*/
static IndexTuple
gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
{
bytea *evec;
char *datum;
int datumsize, i;
int datumsize,
i;
GISTENTRY centry;
char isnull;
IndexTuple newtup;
@ -618,33 +651,33 @@ gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
evec = (bytea *) palloc(len * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = len * sizeof(GISTENTRY) + VARHDRSZ;
for ( i = 0 ; i< len ; i++ )
for (i = 0; i < len; i++)
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[i],
(char*) itvec[i] + sizeof(IndexTupleData),
(Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
IndexTupleSize((IndexTuple)itvec[i]) - sizeof(IndexTupleData), FALSE);
(char *) itvec[i] + sizeof(IndexTupleData),
(Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
IndexTupleSize((IndexTuple) itvec[i]) - sizeof(IndexTupleData), FALSE);
datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn,
PointerGetDatum(evec),
PointerGetDatum(&datumsize)));
for ( i = 0 ; i< len ; i++ )
if ( ((GISTENTRY *) VARDATA(evec))[i].pred &&
for (i = 0; i < len; i++)
if (((GISTENTRY *) VARDATA(evec))[i].pred &&
((GISTENTRY *) VARDATA(evec))[i].pred !=
((char*)( itvec[i] )+ sizeof(IndexTupleData)) )
pfree( ((GISTENTRY *) VARDATA(evec))[i].pred );
((char *) (itvec[i]) + sizeof(IndexTupleData)))
pfree(((GISTENTRY *) VARDATA(evec))[i].pred);
pfree( evec );
pfree(evec);
gistcentryinit(giststate, &centry, datum,
(Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
(Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
if (centry.pred != datum)
pfree( datum );
pfree(datum);
return newtup;
}
@ -653,28 +686,31 @@ gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
* Forms union of oldtup and addtup, if union == oldtup then return NULL
*/
static IndexTuple
gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate ) {
gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
{
bytea *evec;
char *datum;
int datumsize;
bool result;
char isnull;
GISTENTRY centry, *ev0p, *ev1p;
GISTENTRY centry,
*ev0p,
*ev1p;
IndexTuple newtup = NULL;
evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[0],
(char*) oldtup + sizeof(IndexTupleData), (Relation) NULL,
(char *) oldtup + sizeof(IndexTupleData), (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
IndexTupleSize((IndexTuple)oldtup) - sizeof(IndexTupleData), FALSE);
IndexTupleSize((IndexTuple) oldtup) - sizeof(IndexTupleData), FALSE);
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[1],
(char*) addtup + sizeof(IndexTupleData), (Relation) NULL,
(char *) addtup + sizeof(IndexTupleData), (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
IndexTupleSize((IndexTuple)addtup) - sizeof(IndexTupleData), FALSE);
IndexTupleSize((IndexTuple) addtup) - sizeof(IndexTupleData), FALSE);
ev1p = &((GISTENTRY *) VARDATA(evec))[1];
datum = (char *)
@ -682,36 +718,40 @@ gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gi
PointerGetDatum(evec),
PointerGetDatum(&datumsize)));
if ( ! ( ev0p->pred && ev1p->pred ) ) {
result = ( ev0p->pred == NULL && ev1p->pred == NULL );
} else {
if (!(ev0p->pred && ev1p->pred))
result = (ev0p->pred == NULL && ev1p->pred == NULL);
else
{
FunctionCall3(&giststate->equalFn,
PointerGetDatum(ev0p->pred),
PointerGetDatum(datum),
PointerGetDatum(&result));
}
if ( result ) {
if (result)
{
/* not need to update key */
pfree( datum );
} else {
pfree(datum);
}
else
{
gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page,
ev0p->offset, datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
newtup->t_tid = oldtup->t_tid;
if (centry.pred != datum)
pfree( datum );
pfree(datum);
}
if ( ev0p->pred &&
ev0p->pred != (char*) oldtup + sizeof(IndexTupleData) )
pfree( ev0p->pred );
if ( ev1p->pred &&
ev1p->pred != (char*) addtup + sizeof(IndexTupleData) )
pfree( ev1p->pred );
pfree( evec );
if (ev0p->pred &&
ev0p->pred != (char *) oldtup + sizeof(IndexTupleData))
pfree(ev0p->pred);
if (ev1p->pred &&
ev1p->pred != (char *) addtup + sizeof(IndexTupleData))
pfree(ev1p->pred);
pfree(evec);
return newtup;
}
@ -728,19 +768,27 @@ gistSplit(Relation r,
InsertIndexResult *res)
{
Page p;
Buffer leftbuf, rightbuf;
Page left, right;
OffsetNumber *spl_left, *spl_right;
IndexTuple *lvectup, *rvectup, *newtup;
int leftoff, rightoff;
BlockNumber lbknum, rbknum;
Buffer leftbuf,
rightbuf;
Page left,
right;
OffsetNumber *spl_left,
*spl_right;
IndexTuple *lvectup,
*rvectup,
*newtup;
int leftoff,
rightoff;
BlockNumber lbknum,
rbknum;
GISTPageOpaque opaque;
char isnull;
GIST_SPLITVEC v;
bytea *entryvec;
bool *decompvec;
GISTENTRY tmpentry;
int i, nlen;
int i,
nlen;
p = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(p);
@ -773,17 +821,17 @@ gistSplit(Relation r,
right = (Page) BufferGetPage(rightbuf);
/* generate the item array */
entryvec = (bytea *) palloc(VARHDRSZ + (*len+1) * sizeof(GISTENTRY));
decompvec = (bool *) palloc(VARHDRSZ + (*len+1) * sizeof(bool));
VARATT_SIZEP(entryvec) = (*len+1) * sizeof(GISTENTRY) + VARHDRSZ;
entryvec = (bytea *) palloc(VARHDRSZ + (*len + 1) * sizeof(GISTENTRY));
decompvec = (bool *) palloc(VARHDRSZ + (*len + 1) * sizeof(bool));
VARATT_SIZEP(entryvec) = (*len + 1) * sizeof(GISTENTRY) + VARHDRSZ;
for (i = 1; i <= *len; i++)
{
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(entryvec))[i],
(((char *) itup[i-1]) + sizeof(IndexTupleData)),
(((char *) itup[i - 1]) + sizeof(IndexTupleData)),
r, p, i,
IndexTupleSize(itup[i-1]) - sizeof(IndexTupleData), FALSE);
IndexTupleSize(itup[i - 1]) - sizeof(IndexTupleData), FALSE);
if ((char *) (((GISTENTRY *) VARDATA(entryvec))[i].pred)
== (((char *) itup[i-1]) + sizeof(IndexTupleData)))
== (((char *) itup[i - 1]) + sizeof(IndexTupleData)))
decompvec[i] = FALSE;
else
decompvec[i] = TRUE;
@ -801,35 +849,43 @@ gistSplit(Relation r,
pfree(entryvec);
pfree(decompvec);
spl_left = v.spl_left; spl_right = v.spl_right;
spl_left = v.spl_left;
spl_right = v.spl_right;
/* form left and right vector */
lvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nleft );
rvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nright );
lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nleft);
rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nright);
leftoff = rightoff = 0;
for( i=1; i <= *len; i++ ) {
if (i == *(spl_left) || ( i==*len && *(spl_left) != FirstOffsetNumber ) ) {
lvectup[ leftoff++ ] = itup[ i-1 ];
for (i = 1; i <= *len; i++)
{
if (i == *(spl_left) || (i == *len && *(spl_left) != FirstOffsetNumber))
{
lvectup[leftoff++] = itup[i - 1];
spl_left++;
} else {
rvectup[ rightoff++ ] = itup[ i-1 ];
}
else
{
rvectup[rightoff++] = itup[i - 1];
spl_right++;
}
}
/* write on disk (may be need another split) */
if ( gistnospace(right, rvectup, v.spl_nright) ) {
if (gistnospace(right, rvectup, v.spl_nright))
{
nlen = v.spl_nright;
newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
( res && rvectup[ nlen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
ReleaseBuffer( rightbuf );
} else {
(res && rvectup[nlen - 1] == itup[*len - 1]) ? res : NULL);
ReleaseBuffer(rightbuf);
}
else
{
OffsetNumber l;
l = gistwritebuffer( r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate );
l = gistwritebuffer(r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate);
WriteBuffer(rightbuf);
if ( res )
if (res)
ItemPointerSet(&((*res)->pointerData), rbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_rdatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@ -839,32 +895,35 @@ gistSplit(Relation r,
v.spl_rdatum = tmpentry.pred;
nlen = 1;
newtup = (IndexTuple*) palloc( sizeof(IndexTuple) * 1);
isnull = ( v.spl_rdatum ) ? ' ' : 'n';
newtup = (IndexTuple *) palloc(sizeof(IndexTuple) * 1);
isnull = (v.spl_rdatum) ? ' ' : 'n';
newtup[0] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_rdatum), &isnull);
ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1);
}
if ( gistnospace(left, lvectup, v.spl_nleft) ) {
if (gistnospace(left, lvectup, v.spl_nleft))
{
int llen = v.spl_nleft;
IndexTuple *lntup;
lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
( res && lvectup[ llen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
ReleaseBuffer( leftbuf );
(res && lvectup[llen - 1] == itup[*len - 1]) ? res : NULL);
ReleaseBuffer(leftbuf);
newtup = gistjoinvector( newtup, &nlen, lntup, llen );
pfree( lntup );
} else {
newtup = gistjoinvector(newtup, &nlen, lntup, llen);
pfree(lntup);
}
else
{
OffsetNumber l;
l = gistwritebuffer( r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate );
if ( BufferGetBlockNumber(buffer) != GISTP_ROOT)
l = gistwritebuffer(r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate);
if (BufferGetBlockNumber(buffer) != GISTP_ROOT)
PageRestoreTempPage(left, p);
WriteBuffer(leftbuf);
if ( res )
if (res)
ItemPointerSet(&((*res)->pointerData), lbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_ldatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@ -874,10 +933,10 @@ gistSplit(Relation r,
v.spl_ldatum = tmpentry.pred;
nlen += 1;
newtup = (IndexTuple*) repalloc( (void*)newtup, sizeof(IndexTuple) * nlen);
isnull = ( v.spl_ldatum ) ? ' ' : 'n';
newtup[nlen-1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
ItemPointerSet(&(newtup[nlen-1]->t_tid), lbknum, 1);
newtup = (IndexTuple *) repalloc((void *) newtup, sizeof(IndexTuple) * nlen);
isnull = (v.spl_ldatum) ? ' ' : 'n';
newtup[nlen - 1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
ItemPointerSet(&(newtup[nlen - 1]->t_tid), lbknum, 1);
}
@ -885,10 +944,10 @@ gistSplit(Relation r,
gistadjscans(r, GISTOP_SPLIT, BufferGetBlockNumber(buffer), FirstOffsetNumber);
/* !!! pfree */
pfree( rvectup );
pfree( lvectup );
pfree( v.spl_left );
pfree( v.spl_right );
pfree(rvectup);
pfree(lvectup);
pfree(v.spl_left);
pfree(v.spl_right);
*len = nlen;
return newtup;
@ -904,7 +963,7 @@ gistnewroot(GISTSTATE *giststate, Relation r, IndexTuple *itup, int len)
GISTInitBuffer(b, 0);
p = BufferGetPage(b);
gistwritebuffer( r, p, itup, len, FirstOffsetNumber, giststate );
gistwritebuffer(r, p, itup, len, FirstOffsetNumber, giststate);
WriteBuffer(b);
}
@ -1101,7 +1160,7 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
char *datum = (((char *) t) + sizeof(IndexTupleData));
/* if new entry fits in index tuple, copy it in */
if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0 )
if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0)
{
memcpy(datum, entry.pred, entry.bytes);
/* clear out old size */
@ -1118,7 +1177,7 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
IndexTuple newtup;
char isnull;
isnull = ( entry.pred ) ? ' ' : 'n';
isnull = (entry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple(tupDesc,
(Datum *) &(entry.pred),
&isnull);
@ -1182,37 +1241,39 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
GISTPageOpaque opaque;
IndexTuple which;
ItemId iid;
OffsetNumber i,maxoff;
OffsetNumber i,
maxoff;
BlockNumber cblk;
char *pred;
pred = (char*) palloc( sizeof(char)*level+1 );
pred = (char *) palloc(sizeof(char) * level + 1);
MemSet(pred, '\t', level);
pred[level]='\0';
pred[level] = '\0';
buffer = ReadBuffer(r, blk);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber( page );
maxoff = PageGetMaxOffsetNumber(page);
elog(NOTICE,"%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, ( opaque->flags & F_LEAF ) ? "LEAF" : "INTE", (int)blk, (int)maxoff, PageGetFreeSpace(page));
elog(NOTICE, "%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, (opaque->flags & F_LEAF) ? "LEAF" : "INTE", (int) blk, (int) maxoff, PageGetFreeSpace(page));
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
iid = PageGetItemId(page, i);
which = (IndexTuple) PageGetItem(page, iid);
cblk = ItemPointerGetBlockNumber(&(which->t_tid));
#ifdef PRINTTUPLE
elog(NOTICE,"%s Tuple. blk: %d size: %d", pred, (int)cblk, IndexTupleSize( which ) );
elog(NOTICE, "%s Tuple. blk: %d size: %d", pred, (int) cblk, IndexTupleSize(which));
#endif
if ( ! ( opaque->flags & F_LEAF ) ) {
gist_dumptree( r, level+1, cblk, i );
}
if (!(opaque->flags & F_LEAF))
gist_dumptree(r, level + 1, cblk, i);
}
ReleaseBuffer(buffer);
pfree(pred);
}
#endif /* defined GISTDEBUG */
void
@ -1228,7 +1289,6 @@ gist_undo(XLogRecPtr lsn, XLogRecord *record)
}
void
gist_desc(char *buf, uint8 xl_info, char* rec)
gist_desc(char *buf, uint8 xl_info, char *rec)
{
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.49 2001/02/22 21:48:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.50 2001/03/22 03:59:12 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -45,8 +45,10 @@ hashbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -59,9 +61,11 @@ hashbuild(PG_FUNCTION_ARGS)
nitups;
HashItem hitem;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@ -117,6 +121,7 @@ hashbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -191,9 +196,7 @@ hashbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -245,8 +248,10 @@ hashinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
InsertIndexResult res;
HashItem hitem;
@ -327,8 +332,10 @@ Datum
hashrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1);
#endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
@ -493,6 +500,6 @@ hash_undo(XLogRecPtr lsn, XLogRecord *record)
}
void
hash_desc(char *buf, uint8 xl_info, char* rec)
hash_desc(char *buf, uint8 xl_info, char *rec)
{
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.29 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.30 2001/03/22 03:59:13 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -25,32 +25,32 @@
Datum
hashchar(PG_FUNCTION_ARGS)
{
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_CHAR(0)));
PG_RETURN_UINT32(~((uint32) PG_GETARG_CHAR(0)));
}
Datum
hashint2(PG_FUNCTION_ARGS)
{
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT16(0)));
PG_RETURN_UINT32(~((uint32) PG_GETARG_INT16(0)));
}
Datum
hashint4(PG_FUNCTION_ARGS)
{
PG_RETURN_UINT32(~ PG_GETARG_UINT32(0));
PG_RETURN_UINT32(~PG_GETARG_UINT32(0));
}
Datum
hashint8(PG_FUNCTION_ARGS)
{
/* we just use the low 32 bits... */
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT64(0)));
PG_RETURN_UINT32(~((uint32) PG_GETARG_INT64(0)));
}
Datum
hashoid(PG_FUNCTION_ARGS)
{
PG_RETURN_UINT32(~ ((uint32) PG_GETARG_OID(0)));
PG_RETURN_UINT32(~((uint32) PG_GETARG_OID(0)));
}
Datum
@ -93,7 +93,7 @@ hashint2vector(PG_FUNCTION_ARGS)
Datum
hashname(PG_FUNCTION_ARGS)
{
char *key = NameStr(* PG_GETARG_NAME(0));
char *key = NameStr(*PG_GETARG_NAME(0));
return hash_any((char *) key, NAMEDATALEN);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.110 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -564,7 +564,8 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
)
);
}
#endif /* defined(DISABLE_COMPLEX_MACRO)*/
#endif /* defined(DISABLE_COMPLEX_MACRO) */
/* ----------------------------------------------------------------
@ -791,8 +792,8 @@ heap_beginscan(Relation relation,
scan->rs_nkeys = (short) nkeys;
/*
* we do this here instead of in initscan() because heap_rescan
* also calls initscan() and we don't want to allocate memory again
* we do this here instead of in initscan() because heap_rescan also
* calls initscan() and we don't want to allocate memory again
*/
if (nkeys)
scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
@ -1374,7 +1375,7 @@ heap_insert(Relation relation, HeapTuple tup)
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tup->t_self;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapInsert;
rdata[0].next = &(rdata[1]);
@ -1383,12 +1384,12 @@ heap_insert(Relation relation, HeapTuple tup)
xlhdr.t_hoff = tup->t_data->t_hoff;
xlhdr.mask = tup->t_data->t_infomask;
rdata[1].buffer = buffer;
rdata[1].data = (char*)&xlhdr;
rdata[1].data = (char *) &xlhdr;
rdata[1].len = SizeOfHeapHeader;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = buffer;
rdata[2].data = (char*) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[2].data = (char *) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[2].len = tup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[2].next = NULL;
@ -1411,10 +1412,10 @@ heap_insert(Relation relation, HeapTuple tup)
WriteBuffer(buffer);
/*
* If tuple is cachable, mark it for rollback from the caches
* in case we abort. Note it is OK to do this after WriteBuffer
* releases the buffer, because the "tup" data structure is all
* in local memory, not in the shared buffer.
* If tuple is cachable, mark it for rollback from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
* buffer, because the "tup" data structure is all in local memory,
* not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, tup);
@ -1520,7 +1521,7 @@ l1:
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tp.t_self;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapDelete;
rdata[0].next = &(rdata[1]);
@ -1551,9 +1552,10 @@ l1:
#endif
/*
* Mark tuple for invalidation from system caches at next command boundary.
* We have to do this before WriteBuffer because we need to look at the
* contents of the tuple, so we need to hold our refcount on the buffer.
* Mark tuple for invalidation from system caches at next command
* boundary. We have to do this before WriteBuffer because we need to
* look at the contents of the tuple, so we need to hold our refcount
* on the buffer.
*/
RelationInvalidateHeapTuple(relation, &tp);
@ -1636,6 +1638,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
oldtup.t_len = ItemIdGetLength(lp);
oldtup.t_self = *otid;
/*
* Note: beyond this point, use oldtup not otid to refer to old tuple.
* otid may very well point at newtup->t_self, which we will overwrite
@ -1701,18 +1704,19 @@ l2:
/*
* If the toaster needs to be activated, OR if the new tuple will not
* fit on the same page as the old, then we need to release the context
* lock (but not the pin!) on the old tuple's buffer while we are off
* doing TOAST and/or table-file-extension work. We must mark the old
* tuple to show that it's already being updated, else other processes
* may try to update it themselves. To avoid second XLOG log record,
* we use xact mgr hook to unlock old tuple without reading log if xact
* will abort before update is logged. In the event of crash prio logging,
* TQUAL routines will see HEAP_XMAX_UNLOGGED flag...
* fit on the same page as the old, then we need to release the
* context lock (but not the pin!) on the old tuple's buffer while we
* are off doing TOAST and/or table-file-extension work. We must mark
* the old tuple to show that it's already being updated, else other
* processes may try to update it themselves. To avoid second XLOG log
* record, we use xact mgr hook to unlock old tuple without reading
* log if xact will abort before update is logged. In the event of
* crash prio logging, TQUAL routines will see HEAP_XMAX_UNLOGGED
* flag...
*
* NOTE: this trick is useless currently but saved for future
* when we'll implement UNDO and will re-use transaction IDs
* after postmaster startup.
* NOTE: this trick is useless currently but saved for future when we'll
* implement UNDO and will re-use transaction IDs after postmaster
* startup.
*
* We need to invoke the toaster if there are already any toasted values
* present, or if the new tuple is over-threshold.
@ -1726,7 +1730,7 @@ l2:
{
_locked_tuple_.node = relation->rd_node;
_locked_tuple_.tid = oldtup.t_self;
XactPushRollback(_heap_unlock_tuple, (void*) &_locked_tuple_);
XactPushRollback(_heap_unlock_tuple, (void *) &_locked_tuple_);
TransactionIdStore(GetCurrentTransactionId(),
&(oldtup.t_data->t_xmax));
@ -1814,10 +1818,10 @@ l2:
WriteBuffer(buffer);
/*
* If new tuple is cachable, mark it for rollback from the caches
* in case we abort. Note it is OK to do this after WriteBuffer
* releases the buffer, because the "newtup" data structure is all
* in local memory, not in the shared buffer.
* If new tuple is cachable, mark it for rollback from the caches in
* case we abort. Note it is OK to do this after WriteBuffer releases
* the buffer, because the "newtup" data structure is all in local
* memory, not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, newtup);
@ -2136,7 +2140,7 @@ log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
xlrec.node = reln->rd_node;
xlrec.block = BufferGetBlockNumber(buffer);
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapClean;
rdata[0].next = &(rdata[1]);
@ -2157,7 +2161,7 @@ log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CLEAN, rdata);
return(recptr);
return (recptr);
}
static XLogRecPtr
@ -2166,7 +2170,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
{
char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
xl_heap_update xlrec;
xl_heap_header *xlhdr = (xl_heap_header*) tbuf;
xl_heap_header *xlhdr = (xl_heap_header *) tbuf;
int hsize = SizeOfHeapHeader;
XLogRecPtr recptr;
XLogRecData rdata[4];
@ -2177,7 +2181,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
xlrec.target.tid = from;
xlrec.newtid = newtup->t_self;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapUpdate;
rdata[0].next = &(rdata[1]);
@ -2205,12 +2209,12 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
hsize += (2 * sizeof(TransactionId));
}
rdata[2].buffer = newbuf;
rdata[2].data = (char*)xlhdr;
rdata[2].data = (char *) xlhdr;
rdata[2].len = hsize;
rdata[2].next = &(rdata[3]);
rdata[3].buffer = newbuf;
rdata[3].data = (char*) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[3].next = NULL;
@ -2224,20 +2228,20 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
recptr = XLogInsert(RM_HEAP_ID, info, rdata);
return(recptr);
return (recptr);
}
XLogRecPtr
log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup)
{
return(log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
return (log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
}
static void
heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_clean *xlrec = (xl_heap_clean*) XLogRecGetData(record);
xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
Relation reln;
Buffer buffer;
Page page;
@ -2267,14 +2271,14 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfHeapClean)
{
char unbuf[BLCKSZ];
OffsetNumber *unused = (OffsetNumber*)unbuf;
OffsetNumber *unused = (OffsetNumber *) unbuf;
char *unend;
ItemId lp;
memcpy(unbuf, (char*)xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
memcpy(unbuf, (char *) xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
unend = unbuf + (record->xl_len - SizeOfHeapClean);
while((char*)unused < unend)
while ((char *) unused < unend)
{
lp = ((PageHeader) page)->pd_linp + *unused;
lp->lp_flags &= ~LP_USED;
@ -2289,7 +2293,7 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_delete *xlrec = (xl_heap_delete*) XLogRecGetData(record);
xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
Buffer buffer;
Page page;
@ -2320,7 +2324,8 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
return;
}
}
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_delete_undo: bad page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@ -2350,7 +2355,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_insert *xlrec = (xl_heap_insert*) XLogRecGetData(record);
xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
Buffer buffer;
Page page;
@ -2396,9 +2401,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_insert_redo: invalid max offset number");
newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
(char*)xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
(char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@ -2409,7 +2414,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
offnum = PageAddItem(page, (Item)htup, newlen, offnum,
offnum = PageAddItem(page, (Item) htup, newlen, offnum,
LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_insert_redo: failed to add tuple");
@ -2420,7 +2425,8 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
}
/* undo insert */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_insert_undo: bad page LSN");
elog(STOP, "heap_insert_undo: unimplemented");
@ -2432,7 +2438,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
{
xl_heap_update *xlrec = (xl_heap_update*) XLogRecGetData(record);
xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
Buffer buffer;
bool samepage =
@ -2470,7 +2476,8 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
goto newt;
}
}
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_update_undo: bad old tuple page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@ -2557,9 +2564,9 @@ newsame:;
hsize += (2 * sizeof(TransactionId));
newlen = record->xl_len - hsize;
memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
(char*)xlrec + hsize, newlen);
(char *) xlrec + hsize, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@ -2568,9 +2575,9 @@ newsame:;
if (move)
{
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
memcpy(&(htup->t_xmax), (char*)xlrec + hsize, sizeof(TransactionId));
memcpy(&(htup->t_xmax), (char *) xlrec + hsize, sizeof(TransactionId));
memcpy(&(htup->t_xmin),
(char*)xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
(char *) xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
htup->t_infomask = xlhdr.mask;
htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
@ -2585,7 +2592,7 @@ newsame:;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
}
offnum = PageAddItem(page, (Item)htup, newlen, offnum,
offnum = PageAddItem(page, (Item) htup, newlen, offnum,
LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_update_redo: failed to add tuple");
@ -2596,7 +2603,8 @@ newsame:;
}
/* undo */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_update_undo: bad new tuple page LSN");
elog(STOP, "heap_update_undo: unimplemented");
@ -2606,7 +2614,7 @@ newsame:;
static void
_heap_unlock_tuple(void *data)
{
xl_heaptid *xltid = (xl_heaptid*) data;
xl_heaptid *xltid = (xl_heaptid *) data;
Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
Buffer buffer;
Page page;
@ -2645,7 +2653,8 @@ _heap_unlock_tuple(void *data)
return;
}
void heap_redo(XLogRecPtr lsn, XLogRecord *record)
void
heap_redo(XLogRecPtr lsn, XLogRecord *record)
{
uint8 info = record->xl_info & ~XLR_INFO_MASK;
@ -2664,7 +2673,8 @@ void heap_redo(XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_redo: unknown op code %u", info);
}
void heap_undo(XLogRecPtr lsn, XLogRecord *record)
void
heap_undo(XLogRecPtr lsn, XLogRecord *record)
{
uint8 info = record->xl_info & ~XLR_INFO_MASK;
@ -2693,26 +2703,29 @@ out_target(char *buf, xl_heaptid *target)
}
void
heap_desc(char *buf, uint8 xl_info, char* rec)
heap_desc(char *buf, uint8 xl_info, char *rec)
{
uint8 info = xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
{
xl_heap_insert *xlrec = (xl_heap_insert*) rec;
xl_heap_insert *xlrec = (xl_heap_insert *) rec;
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_DELETE)
{
xl_heap_delete *xlrec = (xl_heap_delete*) rec;
xl_heap_delete *xlrec = (xl_heap_delete *) rec;
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_UPDATE || info == XLOG_HEAP_MOVE)
{
xl_heap_update *xlrec = (xl_heap_update*) rec;
xl_heap_update *xlrec = (xl_heap_update *) rec;
if (info == XLOG_HEAP_UPDATE)
strcat(buf, "update: ");
else
@ -2724,7 +2737,8 @@ heap_desc(char *buf, uint8 xl_info, char* rec)
}
else if (info == XLOG_HEAP_CLEAN)
{
xl_heap_clean *xlrec = (xl_heap_clean*) rec;
xl_heap_clean *xlrec = (xl_heap_clean *) rec;
sprintf(buf + strlen(buf), "clean: node %u/%u; blk %u",
xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Id: hio.c,v 1.35 2001/01/24 19:42:48 momjian Exp $
* $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -92,7 +92,7 @@ RelationGetBufferForTuple(Relation relation, Size len)
*/
if (len > MaxTupleSize)
elog(ERROR, "Tuple is too big: size %lu, max size %ld",
(unsigned long)len, MaxTupleSize);
(unsigned long) len, MaxTupleSize);
if (!relation->rd_myxactonly)
LockPage(relation, 0, ExclusiveLock);
@ -140,13 +140,13 @@ RelationGetBufferForTuple(Relation relation, Size len)
{
/* We should not get here given the test at the top */
elog(STOP, "Tuple is too big: size %lu",
(unsigned long)len);
(unsigned long) len);
}
}
if (!relation->rd_myxactonly)
UnlockPage(relation, 0, ExclusiveLock);
return(buffer);
return (buffer);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.17 2001/02/15 20:57:01 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -124,11 +124,11 @@ heap_tuple_untoast_attr(varattrib *attr)
varattrib *tmp;
tmp = toast_fetch_datum(attr);
result = (varattrib *)palloc(attr->va_content.va_external.va_rawsize
result = (varattrib *) palloc(attr->va_content.va_external.va_rawsize
+ VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_external.va_rawsize
+ VARHDRSZ;
pglz_decompress((PGLZ_Header *)tmp, VARATT_DATA(result));
pglz_decompress((PGLZ_Header *) tmp, VARATT_DATA(result));
pfree(tmp);
}
@ -147,11 +147,11 @@ heap_tuple_untoast_attr(varattrib *attr)
* This is a compressed value inside of the main tuple
* ----------
*/
result = (varattrib *)palloc(attr->va_content.va_compressed.va_rawsize
result = (varattrib *) palloc(attr->va_content.va_compressed.va_rawsize
+ VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_compressed.va_rawsize
+ VARHDRSZ;
pglz_decompress((PGLZ_Header *)attr, VARATT_DATA(result));
pglz_decompress((PGLZ_Header *) attr, VARATT_DATA(result));
}
else
/* ----------
@ -270,11 +270,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For UPDATE get the old and new values of this attribute
* ----------
*/
old_value = (varattrib *)DatumGetPointer(
old_value = (varattrib *) DatumGetPointer(
heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
toast_values[i] =
heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
new_value = (varattrib *)DatumGetPointer(toast_values[i]);
new_value = (varattrib *) DatumGetPointer(toast_values[i]);
/* ----------
* If the old value is an external stored one, check if it
@ -356,7 +356,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i])))
{
toast_values[i] = PointerGetDatum(heap_tuple_untoast_attr(
(varattrib *)DatumGetPointer(toast_values[i])));
(varattrib *) DatumGetPointer(toast_values[i])));
toast_free[i] = true;
need_change = true;
need_free = true;
@ -448,7 +448,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
/* incompressible data, ignore on subsequent compression passes */
/*
* incompressible data, ignore on subsequent compression
* passes
*/
toast_action[i] = 'x';
}
}
@ -565,7 +569,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
/* incompressible data, ignore on subsequent compression passes */
/*
* incompressible data, ignore on subsequent compression
* passes
*/
toast_action[i] = 'x';
}
}
@ -662,10 +670,10 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff);
newtup->t_data = (HeapTupleHeader)new_data;
newtup->t_data = (HeapTupleHeader) new_data;
newtup->t_len = new_len;
DataFill((char *)(MAXALIGN((long)new_data +
DataFill((char *) (MAXALIGN((long) new_data +
offsetof(HeapTupleHeaderData, t_bits) +
((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
tupleDesc,
@ -679,7 +687,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* free the memory from the previous run
* ----------
*/
if ((char *)olddata != ((char *)newtup + HEAPTUPLESIZE))
if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata);
/* ----------
@ -772,14 +780,14 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
* Create the varattrib reference
* ----------
*/
result = (varattrib *)palloc(sizeof(varattrib));
result = (varattrib *) palloc(sizeof(varattrib));
result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
if (VARATT_IS_COMPRESSED(value))
{
result->va_header |= VARATT_FLAG_COMPRESSED;
result->va_content.va_external.va_rawsize =
((varattrib *)value)->va_content.va_compressed.va_rawsize;
((varattrib *) value)->va_content.va_compressed.va_rawsize;
}
else
result->va_content.va_external.va_rawsize = VARATT_SIZE(value);
@ -888,7 +896,7 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
static void
toast_delete_datum(Relation rel, Datum value)
{
register varattrib *attr = (varattrib *)value;
register varattrib *attr = (varattrib *) value;
Relation toastrel;
Relation toastidx;
ScanKeyData toastkey;
@ -990,7 +998,7 @@ toast_fetch_datum(varattrib *attr)
memset(chunks_found, 0, numchunks);
memset(chunks_expected, 1, numchunks);
result = (varattrib *)palloc(ressize + VARHDRSZ);
result = (varattrib *) palloc(ressize + VARHDRSZ);
VARATT_SIZEP(result) = ressize + VARHDRSZ;
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
@ -1049,7 +1057,7 @@ toast_fetch_datum(varattrib *attr)
elog(ERROR, "unexpected chunk number %d for toast value %d",
residx,
attr->va_content.va_external.va_valueid);
if (residx < numchunks-1)
if (residx < numchunks - 1)
{
if (chunksize != TOAST_MAX_CHUNK_SIZE)
elog(ERROR, "unexpected chunk size %d in chunk %d for toast value %d",
@ -1072,7 +1080,7 @@ toast_fetch_datum(varattrib *attr)
* Copy the data into proper place in our result
* ----------
*/
memcpy(((char *)VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
memcpy(((char *) VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
VARATT_DATA(chunk),
chunksize);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.48 2001/01/24 19:42:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.49 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -239,7 +239,7 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE:
result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
result = !DatumGetBool(FunctionCall2(&entry->sk_func,
left, right));
break;
@ -249,7 +249,7 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE | SK_COMMUTE:
result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
result = !DatumGetBool(FunctionCall2(&entry->sk_func,
right, left));
break;
@ -263,6 +263,7 @@ StrategyTermEvaluate(StrategyTerm term,
return result;
}
#endif
/* ----------------
@ -465,6 +466,7 @@ RelationInvokeStrategy(Relation relation,
}
#endif
/* ----------------
@ -597,9 +599,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
}
if (cachesearch)
{
ReleaseSysCache(tuple);
}
else
{
heap_endscan(scan);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.40 2001/01/24 19:42:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.41 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
*
@ -236,9 +236,10 @@ bttextcmp(PG_FUNCTION_ARGS)
if (res == 0 && VARSIZE(a) != VARSIZE(b))
{
/*
* The two strings are the same in the first len bytes,
* and they are of different lengths.
* The two strings are the same in the first len bytes, and they
* are of different lengths.
*/
if (VARSIZE(a) < VARSIZE(b))
res = -1;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.81 2001/02/07 23:35:33 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.82 2001/03/22 03:59:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -114,8 +114,8 @@ top:
buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE);
/*
* If we're not allowing duplicates, make sure the key isn't
* already in the index. XXX this belongs somewhere else, likely
* If we're not allowing duplicates, make sure the key isn't already
* in the index. XXX this belongs somewhere else, likely
*/
if (index_is_unique)
{
@ -171,8 +171,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
maxoff = PageGetMaxOffsetNumber(page);
/*
* Find first item >= proposed new item. Note we could also get
* a pointer to end-of-page here.
* Find first item >= proposed new item. Note we could also get a
* pointer to end-of-page here.
*/
offset = _bt_binsrch(rel, buf, natts, itup_scankey);
@ -187,24 +187,24 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
BlockNumber nblkno;
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
* how we handling NULLs - and so we must not use _bt_compare
* in real comparison, but only for ordering/finding items on
* pages. - vadim 03/24/97
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
* handling NULLs - and so we must not use _bt_compare in real
* comparison, but only for ordering/finding items on pages. -
* vadim 03/24/97
*
* make sure the offset points to an actual key
* before trying to compare it...
* make sure the offset points to an actual key before trying to
* compare it...
*/
if (offset <= maxoff)
{
if (! _bt_isequal(itupdesc, page, offset, natts, itup_scankey))
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
/*
* Have to check is inserted heap tuple deleted one (i.e.
* just moved to another place by vacuum)! We only need to
* do this once, but don't want to do it at all unless
* we see equal tuples, so as not to slow down unequal case.
* Have to check is inserted heap tuple deleted one (i.e. just
* moved to another place by vacuum)! We only need to do this
* once, but don't want to do it at all unless we see equal
* tuples, so as not to slow down unequal case.
*/
if (chtup)
{
@ -238,6 +238,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/* Tell _bt_doinsert to wait... */
return xwait;
}
/*
* Otherwise we have a definite conflict.
*/
@ -358,16 +359,14 @@ _bt_insertonpg(Relation rel,
*/
if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
(unsigned long)itemsz,
(PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
(unsigned long) itemsz,
(PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
/*
* Determine exactly where new item will go.
*/
if (afteritem > 0)
{
newitemoff = afteritem + 1;
}
else
{
/*----------
@ -404,10 +403,11 @@ _bt_insertonpg(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
movedright = true;
}
/*
* Now we are on the right page, so find the insert position.
* If we moved right at all, we know we should insert at the
* start of the page, else must find the position by searching.
* Now we are on the right page, so find the insert position. If
* we moved right at all, we know we should insert at the start of
* the page, else must find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
@ -418,9 +418,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its
* result, so this comparison is correct even though we appear to
* be accounting only for the item and not for its line pointer.
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
* so this comparison is correct even though we appear to be
* accounting only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@ -489,10 +489,11 @@ _bt_insertonpg(Relation rel,
if (stack == (BTStack) NULL)
{
elog(DEBUG, "btree: concurrent ROOT page split");
/*
* If root page splitter failed to create new root page
* then old root' btpo_parent still points to metapage.
* We have to fix root page in this case.
* then old root' btpo_parent still points to metapage. We
* have to fix root page in this case.
*/
if (BTreeInvalidParent(lpageop))
{
@ -531,9 +532,9 @@ _bt_insertonpg(Relation rel,
* item! We want to find parent pointing to where we are,
* right ? - vadim 05/27/97
*
* Interestingly, this means we didn't *really* need to stack
* the parent key at all; all we really care about is the
* saved block and offset as a starting point for our search...
* Interestingly, this means we didn't *really* need to stack the
* parent key at all; all we really care about is the saved
* block and offset as a starting point for our search...
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@ -598,10 +599,11 @@ _bt_insertuple(Relation rel, Buffer buf,
XLogRecPtr recptr;
XLogRecData rdata[2];
BTItemData truncitem;
xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), BufferGetBlockNumber(buf), newitemoff);
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeInsert;
rdata[0].next = &(rdata[1]);
@ -610,12 +612,12 @@ _bt_insertuple(Relation rel, Buffer buf,
{
truncitem = *btitem;
truncitem.bti_itup.t_info = sizeof(BTItemData);
rdata[1].data = (char*)&truncitem;
rdata[1].data = (char *) &truncitem;
rdata[1].len = sizeof(BTItemData);
}
else
{
rdata[1].data = (char*)btitem;
rdata[1].data = (char *) btitem;
rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
(sizeof(BTItemData) - sizeof(IndexTupleData));
}
@ -700,8 +702,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/*
* If the page we're splitting is not the rightmost page at its level
* in the tree, then the first entry on the page is the high key
* for the page. We need to copy that to the right half. Otherwise
* in the tree, then the first entry on the page is the high key for
* the page. We need to copy that to the right half. Otherwise
* (meaning the rightmost page case), all the items on the right half
* will be user data.
*/
@ -812,11 +814,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
* We have to grab the right sibling (if any) and fix the prev
* pointer there. We are guaranteed that this is deadlock-free
* since no other writer will be holding a lock on that page
* and trying to move left, and all readers release locks on a page
* before trying to fetch its neighbors.
* We have to grab the right sibling (if any) and fix the prev pointer
* there. We are guaranteed that this is deadlock-free since no other
* writer will be holding a lock on that page and trying to move left,
* and all readers release locks on a page before trying to fetch its
* neighbors.
*/
if (!P_RIGHTMOST(ropaque))
@ -856,31 +858,33 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
BlockIdSet(&(xlrec.parentblk), lopaque->btpo_parent);
BlockIdSet(&(xlrec.leftblk), lopaque->btpo_prev);
BlockIdSet(&(xlrec.rightblk), ropaque->btpo_next);
/*
* Dirrect access to page is not good but faster - we should
* implement some new func in page API.
*/
xlrec.leftlen = ((PageHeader)leftpage)->pd_special -
((PageHeader)leftpage)->pd_upper;
xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
((PageHeader) leftpage)->pd_upper;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeSplit;
rdata[0].next = &(rdata[1]);
rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)leftpage + ((PageHeader)leftpage)->pd_upper;
rdata[1].data = (char *) leftpage + ((PageHeader) leftpage)->pd_upper;
rdata[1].len = xlrec.leftlen;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = InvalidBuffer;
rdata[2].data = (char*)rightpage + ((PageHeader)rightpage)->pd_upper;
rdata[2].len = ((PageHeader)rightpage)->pd_special -
((PageHeader)rightpage)->pd_upper;
rdata[2].data = (char *) rightpage + ((PageHeader) rightpage)->pd_upper;
rdata[2].len = ((PageHeader) rightpage)->pd_special -
((PageHeader) rightpage)->pd_upper;
rdata[2].next = NULL;
if (!P_RIGHTMOST(ropaque))
{
BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
sopaque->btpo_prev = BufferGetBlockNumber(rbuf);
rdata[2].next = &(rdata[3]);
@ -968,23 +972,23 @@ _bt_findsplitloc(Relation rel,
/* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
newitemsz += sizeof(ItemIdData);
state.newitemsz = newitemsz;
state.non_leaf = ! P_ISLEAF(opaque);
state.non_leaf = !P_ISLEAF(opaque);
state.have_split = false;
/* Total free space available on a btree page, after fixed overhead */
leftspace = rightspace =
PageGetPageSize(page) - sizeof(PageHeaderData) -
MAXALIGN(sizeof(BTPageOpaqueData))
+ sizeof(ItemIdData);
+sizeof(ItemIdData);
/*
* Finding the best possible split would require checking all the possible
* split points, because of the high-key and left-key special cases.
* That's probably more work than it's worth; instead, stop as soon as
* we find a "good-enough" split, where good-enough is defined as an
* imbalance in free space of no more than pagesize/16 (arbitrary...)
* This should let us stop near the middle on most pages, instead of
* plowing to the end.
* Finding the best possible split would require checking all the
* possible split points, because of the high-key and left-key special
* cases. That's probably more work than it's worth; instead, stop as
* soon as we find a "good-enough" split, where good-enough is defined
* as an imbalance in free space of no more than pagesize/16
* (arbitrary...) This should let us stop near the middle on most
* pages, instead of plowing to the end.
*/
goodenough = leftspace / 16;
@ -1024,6 +1028,7 @@ _bt_findsplitloc(Relation rel,
*/
leftfree = leftspace - dataitemstoleft - (int) itemsz;
rightfree = rightspace - (dataitemtotal - dataitemstoleft);
/*
* Will the new item go to left or right of split?
*/
@ -1051,10 +1056,10 @@ _bt_findsplitloc(Relation rel,
}
/*
* I believe it is not possible to fail to find a feasible split,
* but just in case ...
* I believe it is not possible to fail to find a feasible split, but
* just in case ...
*/
if (! state.have_split)
if (!state.have_split)
elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
RelationGetRelationName(rel));
@ -1071,6 +1076,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
int leftfree, int rightfree,
bool newitemonleft, Size firstrightitemsz)
{
/*
* Account for the new item on whichever side it is to be put.
*/
@ -1078,13 +1084,15 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
leftfree -= (int) state->newitemsz;
else
rightfree -= (int) state->newitemsz;
/*
* If we are not on the leaf level, we will be able to discard the
* key data from the first item that winds up on the right page.
* If we are not on the leaf level, we will be able to discard the key
* data from the first item that winds up on the right page.
*/
if (state->non_leaf)
rightfree += (int) firstrightitemsz -
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
/*
* If feasible split point, remember best delta.
*/
@ -1134,10 +1142,11 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
maxoff = PageGetMaxOffsetNumber(page);
start = stack->bts_offset;
/*
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
* case of concurrent ROOT page split. Also, watch out for
* possibility that page has a high key now when it didn't before.
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the case of
* concurrent ROOT page split. Also, watch out for possibility that
* page has a high key now when it didn't before.
*/
if (start < P_FIRSTDATAKEY(opaque))
start = P_FIRSTDATAKEY(opaque);
@ -1159,11 +1168,15 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
return buf;
}
}
/* by here, the item we're looking for moved right at least one page */
/*
* by here, the item we're looking for moved right at least one
* page
*/
if (P_RIGHTMOST(opaque))
{
_bt_relbuf(rel, buf, access);
return(InvalidBuffer);
return (InvalidBuffer);
}
blkno = opaque->btpo_next;
@ -1236,9 +1249,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rpage = BufferGetPage(rbuf);
/*
* Make sure pages in old root level have valid parent links --- we will
* need this in _bt_insertonpg() if a concurrent root split happens (see
* README).
* Make sure pages in old root level have valid parent links --- we
* will need this in _bt_insertonpg() if a concurrent root split
* happens (see README).
*/
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
@ -1264,8 +1277,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
pfree(new_item);
/*
* Create downlink item for right page. The key for it is obtained from
* the "high key" position in the left page.
* Create downlink item for right page. The key for it is obtained
* from the "high key" position in the left page.
*/
itemid = PageGetItemId(lpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid);
@ -1293,7 +1306,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
xlrec.level = metad->btm_level;
BlockIdSet(&(xlrec.rootblk), rootblknum);
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeNewroot;
rdata[0].next = &(rdata[1]);
@ -1302,9 +1315,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
* implement some new func in page API.
*/
rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)rootpage + ((PageHeader) rootpage)->pd_upper;
rdata[1].len = ((PageHeader)rootpage)->pd_special -
((PageHeader)rootpage)->pd_upper;
rdata[1].data = (char *) rootpage + ((PageHeader) rootpage)->pd_upper;
rdata[1].len = ((PageHeader) rootpage)->pd_special -
((PageHeader) rootpage)->pd_upper;
rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, rdata);
@ -1325,7 +1338,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* write and let go of metapage buffer */
_bt_wrtbuf(rel, metabuf);
return(rootbuf);
return (rootbuf);
}
/*
@ -1346,17 +1359,24 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
Page oldrootpage = BufferGetPage(oldrootbuf);
BTPageOpaque oldrootopaque = (BTPageOpaque)
PageGetSpecialPointer(oldrootpage);
Buffer buf, leftbuf, rightbuf;
Page page, leftpage, rightpage;
BTPageOpaque opaque, leftopaque, rightopaque;
Buffer buf,
leftbuf,
rightbuf;
Page page,
leftpage,
rightpage;
BTPageOpaque opaque,
leftopaque,
rightopaque;
OffsetNumber newitemoff;
BTItem btitem, ritem;
BTItem btitem,
ritem;
Size itemsz;
if (! P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
if (!P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
elog(ERROR, "bt_fixroot: not valid old root page");
/* Read right neighbor and create new root page*/
/* Read right neighbor and create new root page */
leftbuf = _bt_getbuf(rel, oldrootopaque->btpo_next, BT_WRITE);
leftpage = BufferGetPage(leftbuf);
leftopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage);
@ -1377,26 +1397,26 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
*
* If concurrent process will split one of pages on this level then it
* will see either btpo_parent == metablock or btpo_parent == rootblk.
* In first case it will give up its locks and walk to the leftmost page
* (oldrootbuf) in _bt_fixup() - ie it will wait for us and let us
* continue. In second case it will try to lock rootbuf keeping its locks
* on buffers we already passed, also waiting for us. If we'll have to
* unlock rootbuf (split it) and that process will have to split page
* of new level we created (level of rootbuf) then it will wait while
* we create upper level. Etc.
* In first case it will give up its locks and walk to the leftmost
* page (oldrootbuf) in _bt_fixup() - ie it will wait for us and let
* us continue. In second case it will try to lock rootbuf keeping its
* locks on buffers we already passed, also waiting for us. If we'll
* have to unlock rootbuf (split it) and that process will have to
* split page of new level we created (level of rootbuf) then it will
* wait while we create upper level. Etc.
*/
while(! P_RIGHTMOST(leftopaque))
while (!P_RIGHTMOST(leftopaque))
{
rightbuf = _bt_getbuf(rel, leftopaque->btpo_next, BT_WRITE);
rightpage = BufferGetPage(rightbuf);
rightopaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
/*
* Update LSN & StartUpID of child page buffer to ensure that
* it will be written on disk after flushing log record for new
* root creation. Unfortunately, for the moment (?) we do not
* log this operation and so possibly break our rule to log entire
* page content on first after checkpoint modification.
* Update LSN & StartUpID of child page buffer to ensure that it
* will be written on disk after flushing log record for new root
* creation. Unfortunately, for the moment (?) we do not log this
* operation and so possibly break our rule to log entire page
* content on first after checkpoint modification.
*/
HOLD_INTERRUPTS();
rightopaque->btpo_parent = rootblk;
@ -1450,10 +1470,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
/*
* Here we hold locks on old root buffer, new root buffer we've
* created with _bt_newroot() - rootbuf, - and buf we've used
* for last insert ops - buf. If rootbuf != buf then we have to
* create at least one more level. And if "release" is TRUE
* then we give up oldrootbuf.
* created with _bt_newroot() - rootbuf, - and buf we've used for last
* insert ops - buf. If rootbuf != buf then we have to create at least
* one more level. And if "release" is TRUE then we give up
* oldrootbuf.
*/
if (release)
_bt_wrtbuf(rel, oldrootbuf);
@ -1461,10 +1481,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (rootbuf != buf)
{
_bt_wrtbuf(rel, buf);
return(_bt_fixroot(rel, rootbuf, true));
return (_bt_fixroot(rel, rootbuf, true));
}
return(rootbuf);
return (rootbuf);
}
/*
@ -1479,12 +1499,12 @@ _bt_fixtree(Relation rel, BlockNumber blkno)
BTPageOpaque opaque;
BlockNumber pblkno;
for ( ; ; )
for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (! P_LEFTMOST(opaque) || P_ISLEAF(opaque))
if (!P_LEFTMOST(opaque) || P_ISLEAF(opaque))
elog(ERROR, "bt_fixtree[%s]: invalid start page (need to recreate index)", RelationGetRelationName(rel));
pblkno = opaque->btpo_parent;
@ -1543,7 +1563,8 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
Page cpage[3];
BTPageOpaque copaque[3];
BTItem btitem;
int cidx, i;
int cidx,
i;
bool goodbye = false;
char tbuf[BLCKSZ];
@ -1552,7 +1573,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_READ);
page = (Page)tbuf;
page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* Initialize first child data */
@ -1564,20 +1585,21 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
cbuf[0] = _bt_getbuf(rel, cblkno[0], BT_READ);
cpage[0] = BufferGetPage(cbuf[0]);
copaque[0] = (BTPageOpaque) PageGetSpecialPointer(cpage[0]);
if (P_LEFTMOST(opaque) && ! P_LEFTMOST(copaque[0]))
if (P_LEFTMOST(opaque) && !P_LEFTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: non-leftmost child page of leftmost parent (need to recreate index)", RelationGetRelationName(rel));
/* caller should take care and avoid this */
if (P_RIGHTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: invalid start child (need to recreate index)", RelationGetRelationName(rel));
for ( ; ; )
for (;;)
{
/*
* Read up to 2 more child pages and look for pointers
* to them in *saved* parent page
* Read up to 2 more child pages and look for pointers to them in
* *saved* parent page
*/
coff[1] = coff[2] = InvalidOffsetNumber;
for (cidx = 0; cidx < 2; )
for (cidx = 0; cidx < 2;)
{
cidx++;
cblkno[cidx] = (copaque[cidx - 1])->btpo_next;
@ -1649,7 +1671,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
continue;
}
/* Have to check next page ? */
if ((! P_RIGHTMOST(opaque)) &&
if ((!P_RIGHTMOST(opaque)) &&
coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
{
newbuf = _bt_getbuf(rel, opaque->btpo_next, BT_WRITE);
@ -1720,7 +1742,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
/* copy page with pointer to cblkno[cidx] to temp storage */
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_WRITE);
page = (Page)tbuf;
page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
}
@ -1766,12 +1788,13 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
BlockNumber blkno = true_stack->bts_blkno;
BTStackData stack;
BTPageOpaque opaque;
Buffer buf, rbuf;
Buffer buf,
rbuf;
Page page;
OffsetNumber offnum;
true_stack = true_stack->bts_parent;
for ( ; ; )
for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
@ -1779,8 +1802,8 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
_bt_fixlevel(rel, buf, rblkno);
/*
* Here parent level should have pointers for both
* lblkno and rblkno and we have to find them.
* Here parent level should have pointers for both lblkno and
* rblkno and we have to find them.
*/
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
@ -1829,10 +1852,10 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
}
/*
* Well, we are on the level that was root or unexistent when
* we started traversing tree down. If btpo_parent is updated
* then we'll use it to continue, else we'll fix/restore upper
* levels entirely.
* Well, we are on the level that was root or unexistent when we
* started traversing tree down. If btpo_parent is updated then
* we'll use it to continue, else we'll fix/restore upper levels
* entirely.
*/
if (!BTreeInvalidParent(opaque))
{
@ -1878,14 +1901,14 @@ _bt_fixup(Relation rel, Buffer buf)
BTPageOpaque opaque;
BlockNumber blkno;
for ( ; ; )
for (;;)
{
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* If someone else already created parent pages
* then it's time for _bt_fixtree() to check upper
* levels and fix them, if required.
* If someone else already created parent pages then it's time for
* _bt_fixtree() to check upper levels and fix them, if required.
*/
if (!BTreeInvalidParent(opaque))
{
@ -1904,9 +1927,8 @@ _bt_fixup(Relation rel, Buffer buf)
}
/*
* Ok, we are on the leftmost page, it's write locked
* by us and its btpo_parent points to meta page - time
* for _bt_fixroot().
* Ok, we are on the leftmost page, it's write locked by us and its
* btpo_parent points to meta page - time for _bt_fixroot().
*/
elog(NOTICE, "bt_fixup[%s]: fixing root page", RelationGetRelationName(rel));
buf = _bt_fixroot(rel, buf, true);
@ -1925,16 +1947,16 @@ _bt_getoff(Page page, BlockNumber blkno)
ItemId itemid;
BTItem item;
for ( ; offnum <= maxoff; offnum++)
for (; offnum <= maxoff; offnum++)
{
itemid = PageGetItemId(page, offnum);
item = (BTItem) PageGetItem(page, itemid);
curblkno = ItemPointerGetBlockNumber(&(item->bti_itup.t_tid));
if (curblkno == blkno)
return(offnum);
return (offnum);
}
return(InvalidOffsetNumber);
return (InvalidOffsetNumber);
}
/*
@ -1963,7 +1985,7 @@ _bt_pgaddtup(Relation rel,
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
BTItemData truncitem;
if (! P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
if (!P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.50 2001/02/07 23:35:33 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.51 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -186,12 +186,12 @@ _bt_getroot(Relation rel, int access)
xlrec.level = 1;
BlockIdSet(&(xlrec.rootblk), rootblkno);
rdata.buffer = InvalidBuffer;
rdata.data = (char*)&xlrec;
rdata.data = (char *) &xlrec;
rdata.len = SizeOfBtreeNewroot;
rdata.next = NULL;
recptr = XLogInsert(RM_BTREE_ID,
XLOG_BTREE_NEWROOT|XLOG_BTREE_LEAF, &rdata);
XLOG_BTREE_NEWROOT | XLOG_BTREE_LEAF, &rdata);
PageSetLSN(rootpage, recptr);
PageSetSUI(rootpage, ThisStartUpID);
@ -212,6 +212,7 @@ _bt_getroot(Relation rel, int access)
}
else
{
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
@ -237,18 +238,19 @@ _bt_getroot(Relation rel, int access)
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
if (! P_ISROOT(rootopaque))
if (!P_ISROOT(rootopaque))
{
/*
* It happened, but if root page splitter failed to create
* new root page then we'll go in loop trying to call
* _bt_getroot again and again.
* It happened, but if root page splitter failed to create new
* root page then we'll go in loop trying to call _bt_getroot
* again and again.
*/
if (FixBTree)
{
Buffer newrootbuf;
check_parent:;
check_parent:;
if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
@ -266,20 +268,22 @@ check_parent:;
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
/* New root might be splitted while changing lock */
if (P_ISROOT(rootopaque))
return(rootbuf);
return (rootbuf);
/* rootbuf is read locked */
goto check_parent;
}
else /* someone else already fixed root */
else
/* someone else already fixed root */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
}
}
/*
* Ok, here we have old root page with btpo_parent pointing
* to upper level - check parent page because of there is
* good chance that parent is root page.
* Ok, here we have old root page with btpo_parent pointing to
* upper level - check parent page because of there is good
* chance that parent is root page.
*/
newrootbuf = _bt_getbuf(rel, rootopaque->btpo_parent, BT_READ);
_bt_relbuf(rel, rootbuf, BT_READ);
@ -287,7 +291,7 @@ check_parent:;
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
if (P_ISROOT(rootopaque))
return(rootbuf);
return (rootbuf);
/* no luck -:( */
}
@ -475,7 +479,7 @@ _bt_pagedel(Relation rel, ItemPointer tid)
xlrec.target.node = rel->rd_node;
xlrec.target.tid = *tid;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeDelete;
rdata[0].next = &(rdata[1]);

View File

@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.78 2001/02/07 23:35:33 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.79 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -30,6 +30,7 @@
bool BuildingBtree = false; /* see comment in btbuild() */
bool FastBuild = true; /* use sort/build instead */
/* of insertion build */
@ -56,8 +57,10 @@ btbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -69,9 +72,11 @@ btbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@ -80,10 +85,11 @@ btbuild(PG_FUNCTION_ARGS)
bool usefast;
Snapshot snapshot;
TransactionId XmaxRecent;
/*
* spool2 is needed only when the index is an unique index.
* Dead tuples are put into spool2 instead of spool in
* order to avoid uniqueness check.
* spool2 is needed only when the index is an unique index. Dead
* tuples are put into spool2 instead of spool in order to avoid
* uniqueness check.
*/
BTSpool *spool2 = NULL;
bool tupleIsAlive;
@ -155,9 +161,9 @@ btbuild(PG_FUNCTION_ARGS)
if (usefast)
{
spool = _bt_spoolinit(index, indexInfo->ii_Unique);
/*
* Different from spool,the uniqueness isn't checked
* for spool2.
* Different from spool,the uniqueness isn't checked for spool2.
*/
if (indexInfo->ii_Unique)
spool2 = _bt_spoolinit(index, false);
@ -193,6 +199,7 @@ btbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -253,8 +260,7 @@ btbuild(PG_FUNCTION_ARGS)
* btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
* Sure, it's just rule for placing/finding items and no more -
* keytest'll return FALSE for a = 5 for items having 'a' isNULL.
* Look at _bt_compare for how it works.
* - vadim 03/23/97
* Look at _bt_compare for how it works. - vadim 03/23/97
*
* if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
*/
@ -271,7 +277,8 @@ btbuild(PG_FUNCTION_ARGS)
{
if (tupleIsAlive || !spool2)
_bt_spool(btitem, spool);
else /* dead tuples are put into spool2 */
else
/* dead tuples are put into spool2 */
{
dead_count++;
_bt_spool(btitem, spool2);
@ -296,9 +303,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -408,10 +413,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
/*
* Restore scan position using heap TID returned by previous call
* to btgettuple(). _bt_restscan() re-grabs the read lock on
* the buffer, too.
* to btgettuple(). _bt_restscan() re-grabs the read lock on the
* buffer, too.
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@ -421,8 +427,8 @@ btgettuple(PG_FUNCTION_ARGS)
/*
* Save heap TID to use it in _bt_restscan. Then release the read
* lock on the buffer so that we aren't blocking other backends.
* NOTE: we do keep the pin on the buffer!
* lock on the buffer so that we aren't blocking other backends. NOTE:
* we do keep the pin on the buffer!
*/
if (res)
{
@ -462,8 +468,10 @@ Datum
btrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1);
#endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
@ -671,8 +679,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
* Get back the read lock we were holding on the buffer.
* (We still have a reference-count pin on it, though.)
* Get back the read lock we were holding on the buffer. (We still
* have a reference-count pin on it, though.)
*/
LockBuffer(buf, BT_READ);
@ -694,8 +702,8 @@ _bt_restscan(IndexScanDesc scan)
}
/*
* The item we were on may have moved right due to insertions.
* Find it again.
* The item we were on may have moved right due to insertions. Find it
* again.
*/
for (;;)
{
@ -717,7 +725,8 @@ _bt_restscan(IndexScanDesc scan)
}
/*
* By here, the item we're looking for moved right at least one page
* By here, the item we're looking for moved right at least one
* page
*/
if (P_RIGHTMOST(opaque))
elog(FATAL, "_bt_restscan: my bits moved right off the end of the world!"
@ -742,7 +751,7 @@ _bt_restore_page(Page page, char *from, int len)
Size itemsz;
char *end = from + len;
for ( ; from < end; )
for (; from < end;)
{
memcpy(&btdata, from, sizeof(BTItemData));
itemsz = IndexTupleDSize(btdata.bti_itup) +
@ -766,7 +775,7 @@ btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return;
xlrec = (xl_btree_delete*) XLogRecGetData(record);
xlrec = (xl_btree_delete *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
@ -805,7 +814,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
xlrec = (xl_btree_insert*) XLogRecGetData(record);
xlrec = (xl_btree_insert *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
@ -825,7 +834,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
UnlockAndReleaseBuffer(buffer);
return;
}
if (PageAddItem(page, (Item)((char*)xlrec + SizeOfBtreeInsert),
if (PageAddItem(page, (Item) ((char *) xlrec + SizeOfBtreeInsert),
record->xl_len - SizeOfBtreeInsert,
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
@ -840,7 +849,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_insert_undo: bad page LSN");
if (! P_ISLEAF(pageop))
if (!P_ISLEAF(pageop))
{
UnlockAndReleaseBuffer(buffer);
return;
@ -855,7 +864,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
{
xl_btree_split *xlrec = (xl_btree_split*) XLogRecGetData(record);
xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
Relation reln;
BlockNumber blkno;
Buffer buffer;
@ -892,13 +901,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
pageop->btpo_next = ItemPointerGetBlockNumber(&(xlrec->target.tid));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
_bt_restore_page(page, (char*)xlrec + SizeOfBtreeSplit, xlrec->leftlen);
_bt_restore_page(page, (char *) xlrec + SizeOfBtreeSplit, xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
else /* undo */
else
/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad left sibling LSN");
@ -929,14 +939,15 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
_bt_restore_page(page,
(char*)xlrec + SizeOfBtreeSplit + xlrec->leftlen,
(char *) xlrec + SizeOfBtreeSplit + xlrec->leftlen,
record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
else /* undo */
else
/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad right sibling LSN");
@ -977,7 +988,7 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
xl_btree_newroot *xlrec = (xl_btree_newroot*) XLogRecGetData(record);
xl_btree_newroot *xlrec = (xl_btree_newroot *) XLogRecGetData(record);
Relation reln;
Buffer buffer;
Page page;
@ -1011,7 +1022,7 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfBtreeNewroot)
_bt_restore_page(page,
(char*)xlrec + SizeOfBtreeNewroot,
(char *) xlrec + SizeOfBtreeNewroot,
record->xl_len - SizeOfBtreeNewroot);
PageSetLSN(page, lsn);
@ -1065,7 +1076,7 @@ btree_undo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(false, lsn, record);
else if (info == XLOG_BTREE_SPLIT)
btree_xlog_split(false, false, lsn, record);/* new item on the right */
btree_xlog_split(false, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT)
btree_xlog_split(false, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT)
@ -1084,26 +1095,29 @@ out_target(char *buf, xl_btreetid *target)
}
void
btree_desc(char *buf, uint8 xl_info, char* rec)
btree_desc(char *buf, uint8 xl_info, char *rec)
{
uint8 info = xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_INSERT)
{
xl_btree_insert *xlrec = (xl_btree_insert*) rec;
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_DELETE)
{
xl_btree_delete *xlrec = (xl_btree_delete*) rec;
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_SPLIT || info == XLOG_BTREE_SPLEFT)
{
xl_btree_split *xlrec = (xl_btree_split*) rec;
xl_btree_split *xlrec = (xl_btree_split *) rec;
sprintf(buf + strlen(buf), "split(%s): ",
(info == XLOG_BTREE_SPLIT) ? "right" : "left");
out_target(buf, &(xlrec->target));
@ -1113,7 +1127,8 @@ btree_desc(char *buf, uint8 xl_info, char* rec)
}
else if (info == XLOG_BTREE_NEWROOT)
{
xl_btree_newroot *xlrec = (xl_btree_newroot*) rec;
xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
sprintf(buf + strlen(buf), "root: node %u/%u; blk %u",
xlrec->node.tblNode, xlrec->node.relNode,
BlockIdGetBlockNumber(&xlrec->rootblk));

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.63 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -45,7 +45,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getroot(rel, access);
/* If index is empty and access = BT_READ, no root page is created. */
if (! BufferIsValid(*bufP))
if (!BufferIsValid(*bufP))
return (BTStack) NULL;
/* Loop iterates once per level descended in the tree */
@ -79,13 +79,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
* We need to save the bit image of the index entry we chose in the
* parent page on a stack. In case we split the tree, we'll use this
* bit image to figure out what our real parent page is, in case the
* parent splits while we're working lower in the tree. See the paper
* by Lehman and Yao for how this is detected and handled. (We use the
* child link to disambiguate duplicate keys in the index -- Lehman
* and Yao disallow duplicate keys.)
* We need to save the bit image of the index entry we chose in
* the parent page on a stack. In case we split the tree, we'll
* use this bit image to figure out what our real parent page is,
* in case the parent splits while we're working lower in the
* tree. See the paper by Lehman and Yao for how this is detected
* and handled. (We use the child link to disambiguate duplicate
* keys in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@ -98,9 +98,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getbuf(rel, blkno, BT_READ);
/*
* Race -- the page we just grabbed may have split since we read its
* pointer in the parent. If it has, we may need to move right to its
* new sibling. Do that.
* Race -- the page we just grabbed may have split since we read
* its pointer in the parent. If it has, we may need to move
* right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@ -299,7 +299,7 @@ _bt_compare(Relation rel,
* Force result ">" if target item is first data item on an internal
* page --- see NOTE above.
*/
if (! P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
return 1;
btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum));
@ -458,10 +458,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_orderkeys(rel, so);
/*
* Quit now if _bt_orderkeys() discovered that the scan keys can
* never be satisfied (eg, x == 1 AND x > 2).
* Quit now if _bt_orderkeys() discovered that the scan keys can never
* be satisfied (eg, x == 1 AND x > 2).
*/
if (! so->qual_ok)
if (!so->qual_ok)
return (RetrieveIndexResult) NULL;
/*
@ -484,17 +484,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
strat = _bt_getstrat(rel, attno,
so->keyData[i].sk_procedure);
/*
* Can we use this key as a starting boundary for this attr?
*
* We can use multiple keys if they look like, say, = >= =
* but we have to stop after accepting a > or < boundary.
* We can use multiple keys if they look like, say, = >= = but we
* have to stop after accepting a > or < boundary.
*/
if (strat == strat_total ||
strat == BTEqualStrategyNumber)
{
nKeyIs[keysCount++] = i;
}
else if (ScanDirectionIsBackward(dir) &&
(strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber))
@ -536,7 +535,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
for (i = 0; i < keysCount; i++)
{
j = nKeyIs[i];
/* _bt_orderkeys disallows it, but it's place to add some code later */
/*
* _bt_orderkeys disallows it, but it's place to add some code
* later
*/
if (so->keyData[j].sk_flags & SK_ISNULL)
{
pfree(nKeyIs);
@ -562,7 +565,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* don't need to keep the stack around... */
_bt_freestack(stack);
if (! BufferIsValid(buf))
if (!BufferIsValid(buf))
{
/* Only get here if index is completely empty */
ItemPointerSetInvalid(current);
@ -601,6 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
switch (strat_total)
{
case BTLessStrategyNumber:
/*
* Back up one to arrive at last item < scankey
*/
@ -612,6 +616,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTLessEqualStrategyNumber:
/*
* We need to find the last item <= scankey, so step forward
* till we find one > scankey, then step back one.
@ -645,9 +650,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTEqualStrategyNumber:
/*
* Make sure we are on the first equal item; might have to step
* forward if currently at end of page.
* Make sure we are on the first equal item; might have to
* step forward if currently at end of page.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@ -662,6 +668,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
result = _bt_compare(rel, keysCount, scankeys, page, offnum);
if (result != 0)
goto nomatches; /* no equal items! */
/*
* If a backward scan was specified, need to start with last
* equal item not first one.
@ -685,6 +692,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterEqualStrategyNumber:
/*
* We want the first item >= scankey, which is where we are...
* unless we're not anywhere at all...
@ -700,9 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterStrategyNumber:
/*
* We want the first item > scankey, so make sure we are on
* an item and then step over any equal items.
* We want the first item > scankey, so make sure we are on an
* item and then step over any equal items.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@ -850,11 +859,12 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
*bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* If the adjacent page just split, then we have to walk
* right to find the block that's now adjacent to where
* we were. Because pages only split right, we don't have
* to worry about this failing to terminate.
* right to find the block that's now adjacent to where we
* were. Because pages only split right, we don't have to
* worry about this failing to terminate.
*/
while (opaque->btpo_next != obknum)
{
@ -917,7 +927,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
*/
buf = _bt_getroot(rel, BT_READ);
if (! BufferIsValid(buf))
if (!BufferIsValid(buf))
{
/* empty index... */
ItemPointerSetInvalid(current);
@ -981,7 +991,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page);
if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */
if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty
* page */
start = P_FIRSTDATAKEY(opaque);
}
else
@ -995,8 +1006,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so->btso_curbuf = buf;
/*
* Left/rightmost page could be empty due to deletions,
* if so step till we find a nonempty page.
* Left/rightmost page could be empty due to deletions, if so step
* till we find a nonempty page.
*/
if (start > maxoff)
{

View File

@ -35,7 +35,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.59 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -73,10 +73,12 @@ typedef struct BTPageState
{
Buffer btps_buf; /* current buffer & page */
Page btps_page;
BTItem btps_minkey; /* copy of minimum key (first item) on page */
BTItem btps_minkey; /* copy of minimum key (first item) on
* page */
OffsetNumber btps_lastoff; /* last item offset loaded */
int btps_level; /* tree level (0 = leaf) */
Size btps_full; /* "full" if less than this much free space */
Size btps_full; /* "full" if less than this much free
* space */
struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState;
@ -271,7 +273,7 @@ _bt_sortaddtup(Page page,
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
BTItemData truncitem;
if (! P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
if (!P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);
@ -347,11 +349,12 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
*/
if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
(unsigned long)btisz,
(PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
(unsigned long) btisz,
(PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
if (pgspc < btisz || pgspc < state->btps_full)
{
/*
* Item won't fit on this page, or we feel the page is full enough
* already. Finish off the page and write it out.
@ -388,9 +391,9 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/*
* Link the old buffer into its parent, using its minimum key.
* If we don't have a parent, we have to create one;
* this adds a new btree level.
* Link the old buffer into its parent, using its minimum key. If
* we don't have a parent, we have to create one; this adds a new
* btree level.
*/
if (state->btps_next == (BTPageState *) NULL)
{
@ -405,8 +408,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* Save a copy of the minimum key for the new page. We have to
* copy it off the old page, not the new one, in case we are
* not at leaf level.
* copy it off the old page, not the new one, in case we are not
* at leaf level.
*/
state->btps_minkey = _bt_formitem(&(obti->bti_itup));
@ -414,13 +417,13 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
* Set the sibling links for both pages, and parent links too.
*
* It's not necessary to set the parent link at all, because it's
* only used for handling concurrent root splits, but we may as well
* do it as a debugging aid. Note we set new page's link as well
* as old's, because if the new page turns out to be the last of
* the level, _bt_uppershutdown won't change it. The links may be
* out of date by the time the build finishes, but that's OK; they
* need only point to a left-sibling of the true parent. See the
* README file for more info.
* only used for handling concurrent root splits, but we may as
* well do it as a debugging aid. Note we set new page's link as
* well as old's, because if the new page turns out to be the last
* of the level, _bt_uppershutdown won't change it. The links may
* be out of date by the time the build finishes, but that's OK;
* they need only point to a left-sibling of the true parent. See
* the README file for more info.
*/
{
BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage);
@ -449,8 +452,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* If the new item is the first for its page, stash a copy for later.
* Note this will only happen for the first item on a level; on later
* pages, the first item for a page is copied from the prior page
* in the code above.
* pages, the first item for a page is copied from the prior page in
* the code above.
*/
if (last_off == P_HIKEY)
{
@ -493,8 +496,8 @@ _bt_uppershutdown(Relation index, BTPageState *state)
*
* If we're at the top, it's the root, so attach it to the metapage.
* Otherwise, add an entry for it to its parent using its minimum
* key. This may cause the last page of the parent level to split,
* but that's not a problem -- we haven't gotten to it yet.
* key. This may cause the last page of the parent level to
* split, but that's not a problem -- we haven't gotten to it yet.
*/
if (s->btps_next == (BTPageState *) NULL)
{
@ -529,21 +532,28 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
{
BTPageState *state = NULL;
bool merge = (btspool2 != NULL);
BTItem bti, bti2 = NULL;
bool should_free, should_free2, load1;
BTItem bti,
bti2 = NULL;
bool should_free,
should_free2,
load1;
TupleDesc tupdes = RelationGetDescr(index);
int i, keysz = RelationGetNumberOfAttributes(index);
int i,
keysz = RelationGetNumberOfAttributes(index);
ScanKey indexScanKey = NULL;
if (merge)
{
/*
* Another BTSpool for dead tuples exists.
* Now we have to merge btspool and btspool2.
* Another BTSpool for dead tuples exists. Now we have to merge
* btspool and btspool2.
*/
ScanKey entry;
Datum attrDatum1, attrDatum2;
bool isFirstNull, isSecondNull;
Datum attrDatum1,
attrDatum2;
bool isFirstNull,
isSecondNull;
int32 compare;
/* the preparation of merge */
@ -564,8 +574,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
for (i = 1; i <= keysz; i++)
{
entry = indexScanKey + i - 1;
attrDatum1 = index_getattr((IndexTuple)bti, i, tupdes, &isFirstNull);
attrDatum2 = index_getattr((IndexTuple)bti2, i, tupdes, &isSecondNull);
attrDatum1 = index_getattr((IndexTuple) bti, i, tupdes, &isFirstNull);
attrDatum2 = index_getattr((IndexTuple) bti2, i, tupdes, &isSecondNull);
if (isFirstNull)
{
if (!isSecondNull)
@ -613,7 +623,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
_bt_freeskey(indexScanKey);
}
else /* merge is unnecessary */
else
/* merge is unnecessary */
{
while (bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free), bti != (BTItem) NULL)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.42 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.43 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -240,8 +240,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
/*
* Initialize for processing of keys for attr 1.
*
* xform[i] holds a copy of the current scan key of strategy type i+1,
* if any; init[i] is TRUE if we have found such a key for this attr.
* xform[i] holds a copy of the current scan key of strategy type i+1, if
* any; init[i] is TRUE if we have found such a key for this attr.
*/
attno = 1;
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
@ -255,7 +255,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* pass to handle after-last-key processing. Actual exit from the
* loop is at the "break" statement below.
*/
for (i = 0; ; cur++, i++)
for (i = 0;; cur++, i++)
{
if (i < numberOfKeys)
{
@ -263,7 +263,9 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (cur->sk_flags & SK_ISNULL)
{
so->qual_ok = false;
/* Quit processing so we don't try to invoke comparison
/*
* Quit processing so we don't try to invoke comparison
* routines on NULLs.
*/
return;
@ -271,8 +273,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
/*
* If we are at the end of the keys for a particular attr,
* finish up processing and emit the cleaned-up keys.
* If we are at the end of the keys for a particular attr, finish
* up processing and emit the cleaned-up keys.
*/
if (i == numberOfKeys || cur->sk_attno != attno)
{
@ -296,7 +298,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
eq = &xform[BTEqualStrategyNumber - 1];
for (j = BTMaxStrategyNumber; --j >= 0;)
{
if (! init[j] ||
if (!init[j] ||
j == (BTEqualStrategyNumber - 1))
continue;
chk = &xform[j];
@ -313,6 +315,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
else
{
/*
* No "=" for this key, so we're done with required keys
*/
@ -355,8 +358,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* Emit the cleaned-up keys back into the key[] array in the
* correct order. Note we are overwriting our input here!
* It's OK because (a) xform[] is a physical copy of the keys
* we want, (b) we cannot emit more keys than we input, so
* we won't overwrite as-yet-unprocessed keys.
* we want, (b) we cannot emit more keys than we input, so we
* won't overwrite as-yet-unprocessed keys.
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
{
@ -409,7 +412,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (DatumGetBool(test))
xform[j].sk_argument = cur->sk_argument;
else if (j == (BTEqualStrategyNumber - 1))
so->qual_ok = false; /* key == a && key == b, but a != b */
so->qual_ok = false; /* key == a && key == b, but a !=
* b */
}
else
{
@ -473,16 +477,18 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (isNull)
{
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual
* is one of the "must match" subset. On a backward scan,
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if (keysok < so->numberOfRequiredKeys &&
ScanDirectionIsForward(dir))
*continuescan = false;
/*
* In any case, this indextuple doesn't match the qual.
*/
@ -498,9 +504,10 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (DatumGetBool(test) == !!(key->sk_flags & SK_NEGATE))
{
/*
* Tuple fails this qual. If it's a required qual, then
* we can conclude no further tuples will pass, either.
* Tuple fails this qual. If it's a required qual, then we
* can conclude no further tuples will pass, either.
*/
if (keysok < so->numberOfRequiredKeys)
*continuescan = false;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.24 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.25 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.31 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.32 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -70,6 +70,7 @@ Datum
rt_box_size(PG_FUNCTION_ARGS)
{
BOX *a = PG_GETARG_BOX_P(0);
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
@ -155,13 +156,15 @@ Datum
rt_poly_size(PG_FUNCTION_ARGS)
{
Pointer aptr = PG_GETARG_POINTER(0);
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
POLYGON *a;
double xdim,
ydim;
/* Can't just use GETARG because of possibility that input is NULL;
/*
* Can't just use GETARG because of possibility that input is NULL;
* since POLYGON is toastable, GETARG will try to inspect its value
*/
if (aptr == NULL)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.60 2001/03/07 21:20:26 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.61 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -88,8 +88,10 @@ rtbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -101,9 +103,11 @@ rtbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@ -171,6 +175,7 @@ rtbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -232,9 +237,7 @@ rtbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -282,8 +285,10 @@ rtinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
InsertIndexResult res;
IndexTuple itup;
@ -564,7 +569,7 @@ rtdosplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/* now insert the new index tuple */
if (*spl_left == maxoff+1)
if (*spl_left == maxoff + 1)
{
if (PageAddItem(left, (Item) itup, IndexTupleSize(itup),
leftoff, LP_USED) == InvalidOffsetNumber)
@ -576,7 +581,7 @@ rtdosplit(Relation r,
}
else
{
Assert(*spl_right == maxoff+1);
Assert(*spl_right == maxoff + 1);
if (PageAddItem(right, (Item) itup, IndexTupleSize(itup),
rightoff, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "rtdosplit: failed to add index item to %s",
@ -665,10 +670,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/*
* This is a hack. Right now, we force rtree internal keys to be constant
* size. To fix this, need delete the old key and add both left and
* right for the two new pages. The insertion of left may force a
* split if the new left key is bigger than the old key.
* This is a hack. Right now, we force rtree internal keys to be
* constant size. To fix this, need delete the old key and add both
* left and right for the two new pages. The insertion of left may
* force a split if the new left key is bigger than the old key.
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
@ -794,9 +799,10 @@ rtpicksplit(Relation r,
right_avail_space;
/*
* First, make sure the new item is not so large that we can't possibly
* fit it on a page, even by itself. (It's sufficient to make this test
* here, since any oversize tuple must lead to a page split attempt.)
* First, make sure the new item is not so large that we can't
* possibly fit it on a page, even by itself. (It's sufficient to
* make this test here, since any oversize tuple must lead to a page
* split attempt.)
*/
newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace)
@ -804,7 +810,8 @@ rtpicksplit(Relation r,
(unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
maxoff = PageGetMaxOffsetNumber(page);
newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */
newitemoff = OffsetNumberNext(maxoff); /* phony index for new
* item */
/* Make arrays big enough for worst case, including sentinel */
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@ -827,8 +834,8 @@ rtpicksplit(Relation r,
item_2_sz = IndexTupleTotalSize(item_2);
/*
* Ignore seed pairs that don't leave room for the new item
* on either split page.
* Ignore seed pairs that don't leave room for the new item on
* either split page.
*/
if (newitemsz + item_1_sz > RTPageAvailSpace &&
newitemsz + item_2_sz > RTPageAvailSpace)
@ -841,8 +848,10 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_union));
inter_d = FunctionCall2(&rtstate->interFn,
datum_alpha, datum_beta);
/* The interFn may return a NULL pointer (not an SQL null!)
* to indicate no intersection. sizeFn must cope with this.
/*
* The interFn may return a NULL pointer (not an SQL null!) to
* indicate no intersection. sizeFn must cope with this.
*/
FunctionCall2(&rtstate->sizeFn, inter_d,
PointerGetDatum(&size_inter));
@ -869,6 +878,7 @@ rtpicksplit(Relation r,
if (firsttime)
{
/*
* There is no possible split except to put the new item on its
* own page. Since we still have to compute the union rectangles,
@ -922,8 +932,8 @@ rtpicksplit(Relation r,
/*
* If we've already decided where to place this item, just put it
* on the correct list. Otherwise, we need to figure out which page
* needs the least enlargement in order to store the item.
* on the correct list. Otherwise, we need to figure out which
* page needs the least enlargement in order to store the item.
*/
if (i == seed_1)
@ -961,12 +971,13 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_beta));
/*
* We prefer the page that shows smaller enlargement of its union area
* (Guttman's algorithm), but we must take care that at least one page
* will still have room for the new item after this one is added.
* We prefer the page that shows smaller enlargement of its union
* area (Guttman's algorithm), but we must take care that at least
* one page will still have room for the new item after this one
* is added.
*
* (We know that all the old items together can fit on one page,
* so we need not worry about any other problem than failing to fit
* (We know that all the old items together can fit on one page, so
* we need not worry about any other problem than failing to fit
* the new item.)
*/
left_feasible = (left_avail_space >= item_1_sz &&
@ -987,7 +998,7 @@ rtpicksplit(Relation r,
else
{
elog(ERROR, "rtpicksplit: failed to find a workable page split");
choose_left = false; /* keep compiler quiet */
choose_left = false;/* keep compiler quiet */
}
if (choose_left)
@ -1211,6 +1222,6 @@ rtree_undo(XLogRecPtr lsn, XLogRecord *record)
}
void
rtree_desc(char *buf, uint8 xl_info, char* rec)
rtree_desc(char *buf, uint8 xl_info, char *rec)
{
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.35 2001/01/24 19:42:50 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.36 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -10,20 +10,20 @@
#include "commands/sequence.h"
RmgrData RmgrTable[] = {
{"XLOG", xlog_redo, xlog_undo, xlog_desc},
{"Transaction", xact_redo, xact_undo, xact_desc},
{"Storage", smgr_redo, smgr_undo, smgr_desc},
{"Reserved 3", NULL, NULL, NULL},
{"Reserved 4", NULL, NULL, NULL},
{"Reserved 5", NULL, NULL, NULL},
{"Reserved 6", NULL, NULL, NULL},
{"Reserved 7", NULL, NULL, NULL},
{"Reserved 8", NULL, NULL, NULL},
{"Reserved 9", NULL, NULL, NULL},
{"Heap", heap_redo, heap_undo, heap_desc},
{"Btree", btree_redo, btree_undo, btree_desc},
{"Hash", hash_redo, hash_undo, hash_desc},
{"Rtree", rtree_redo, rtree_undo, rtree_desc},
{"Gist", gist_redo, gist_undo, gist_desc},
{"Sequence", seq_redo, seq_undo, seq_desc}
{"XLOG", xlog_redo, xlog_undo, xlog_desc},
{"Transaction", xact_redo, xact_undo, xact_desc},
{"Storage", smgr_redo, smgr_undo, smgr_desc},
{"Reserved 3", NULL, NULL, NULL},
{"Reserved 4", NULL, NULL, NULL},
{"Reserved 5", NULL, NULL, NULL},
{"Reserved 6", NULL, NULL, NULL},
{"Reserved 7", NULL, NULL, NULL},
{"Reserved 8", NULL, NULL, NULL},
{"Reserved 9", NULL, NULL, NULL},
{"Heap", heap_redo, heap_undo, heap_desc},
{"Btree", btree_redo, btree_undo, btree_desc},
{"Hash", hash_redo, hash_undo, hash_desc},
{"Rtree", rtree_redo, rtree_undo, rtree_desc},
{"Gist", gist_redo, gist_undo, gist_desc},
{"Sequence", seq_redo, seq_undo, seq_desc}
};

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.41 2001/03/18 20:18:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.28 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains support functions for the high
@ -186,7 +186,7 @@ TransBlockGetXidStatus(Block tblock,
bits8 bit2;
BitIndex offset;
tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where
@ -229,7 +229,7 @@ TransBlockSetXidStatus(Block tblock,
Index index;
BitIndex offset;
tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where

View File

@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.37 2001/03/18 20:18:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.38 2001/03/22 03:59:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -32,9 +32,10 @@ VariableCache ShmemVariableCache = NULL;
void
GetNewTransactionId(TransactionId *xid)
{
/*
* During bootstrap initialization, we return the special
* bootstrap transaction id.
* During bootstrap initialization, we return the special bootstrap
* transaction id.
*/
if (AMI_OVERRIDE)
{
@ -60,9 +61,10 @@ GetNewTransactionId(TransactionId *xid)
void
ReadNewTransactionId(TransactionId *xid)
{
/*
* During bootstrap initialization, we return the special
* bootstrap transaction id.
* During bootstrap initialization, we return the special bootstrap
* transaction id.
*/
if (AMI_OVERRIDE)
{
@ -130,10 +132,9 @@ CheckMaxObjectId(Oid assigned_oid)
}
/*
* We have exceeded the logged oid range.
* We should lock the database and kill all other backends
* but we are loading oid's that we can not guarantee are unique
* anyway, so we must rely on the user.
* We have exceeded the logged oid range. We should lock the database
* and kill all other backends but we are loading oid's that we can
* not guarantee are unique anyway, so we must rely on the user.
*/
XLogPutNextOid(assigned_oid + VAR_OID_PREFETCH);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.99 2001/03/13 01:17:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@ -222,9 +222,10 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
int CommitDelay = 0; /* precommit delay in microseconds */
int CommitSiblings = 5; /* number of concurrent xacts needed to sleep */
int CommitSiblings = 5; /* number of concurrent xacts needed to
* sleep */
static void (*_RollbackFunc)(void*) = NULL;
static void (*_RollbackFunc) (void *) = NULL;
static void *_RollbackData = NULL;
/* ----------------
@ -674,25 +675,26 @@ RecordTransactionCommit()
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&xlrec);
rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactCommit;
rdata.next = NULL;
START_CRIT_SECTION();
/*
* SHOULD SAVE ARRAY OF RELFILENODE-s TO DROP
*/
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT, &rdata);
/*
* Sleep before commit! So we can flush more than one
* commit records per single fsync. (The idea is some other
* backend may do the XLogFlush while we're sleeping. This
* needs work still, because on most Unixen, the minimum
* select() delay is 10msec or more, which is way too long.)
* Sleep before commit! So we can flush more than one commit
* records per single fsync. (The idea is some other backend may
* do the XLogFlush while we're sleeping. This needs work still,
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
*
* We do not sleep if enableFsync is not turned on, nor if there
* are fewer than CommitSiblings other backends with active
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
@ -818,7 +820,7 @@ RecordTransactionAbort(void)
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&xlrec);
rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactAbort;
rdata.next = NULL;
@ -896,9 +898,7 @@ AtAbort_Memory(void)
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
}
else
{
MemoryContextSwitchTo(TopMemoryContext);
}
}
@ -1021,6 +1021,7 @@ CurrentXactInProgress(void)
{
return CurrentTransactionState->state == TRANS_INPROGRESS;
}
#endif
/* --------------------------------
@ -1106,7 +1107,7 @@ CommitTransaction(void)
AtCommit_Memory();
AtEOXact_Files();
SharedBufferChanged = false; /* safest place to do it */
SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* done with commit processing, set current transaction
@ -1143,15 +1144,16 @@ AbortTransaction(void)
/*
* Release any spinlocks or buffer context locks we might be holding
* as quickly as possible. (Real locks, however, must be held till
* we finish aborting.) Releasing spinlocks is critical since we
* might try to grab them again while cleaning up!
* as quickly as possible. (Real locks, however, must be held till we
* finish aborting.) Releasing spinlocks is critical since we might
* try to grab them again while cleaning up!
*/
ProcReleaseSpins(NULL);
UnlockBuffers();
/*
* Also clean up any open wait for lock, since the lock manager
* will choke if we try to wait for another lock before doing this.
* Also clean up any open wait for lock, since the lock manager will
* choke if we try to wait for another lock before doing this.
*/
LockWaitCancel();
@ -1203,7 +1205,7 @@ AbortTransaction(void)
AtEOXact_Files();
AtAbort_Locks();
SharedBufferChanged = false; /* safest place to do it */
SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* State remains TRANS_ABORT until CleanupTransaction().
@ -1327,8 +1329,8 @@ StartTransactionCommand(void)
}
/*
* We must switch to TransactionCommandContext before returning.
* This is already done if we called StartTransaction, otherwise not.
* We must switch to TransactionCommandContext before returning. This
* is already done if we called StartTransaction, otherwise not.
*/
Assert(TransactionCommandContext != NULL);
MemoryContextSwitchTo(TransactionCommandContext);
@ -1765,9 +1767,7 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
/* SHOULD REMOVE FILES OF ALL DROPPED RELATIONS */
}
else if (info == XLOG_XACT_ABORT)
{
TransactionIdAbort(record->xl_xid);
}
else
elog(STOP, "xact_redo: unknown op code %u", info);
}
@ -1784,13 +1784,13 @@ xact_undo(XLogRecPtr lsn, XLogRecord *record)
}
void
xact_desc(char *buf, uint8 xl_info, char* rec)
xact_desc(char *buf, uint8 xl_info, char *rec)
{
uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
xl_xact_commit *xlrec = (xl_xact_commit*) rec;
xl_xact_commit *xlrec = (xl_xact_commit *) rec;
struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "commit: %04u-%02u-%02u %02u:%02u:%02u",
@ -1799,7 +1799,7 @@ xact_desc(char *buf, uint8 xl_info, char* rec)
}
else if (info == XLOG_XACT_ABORT)
{
xl_xact_abort *xlrec = (xl_xact_abort*) rec;
xl_xact_abort *xlrec = (xl_xact_abort *) rec;
struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "abort: %04u-%02u-%02u %02u:%02u:%02u",
@ -1811,7 +1811,7 @@ xact_desc(char *buf, uint8 xl_info, char* rec)
}
void
XactPushRollback(void (*func) (void *), void* data)
XactPushRollback(void (*func) (void *), void *data)
{
#ifdef XLOG_II
if (_RollbackFunc != NULL)

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: xid.c,v 1.29 2001/01/24 19:42:51 momjian Exp $
* $Id: xid.c,v 1.30 2001/03/22 03:59:18 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING
@ -49,6 +49,7 @@ Datum
xidout(PG_FUNCTION_ARGS)
{
TransactionId transactionId = PG_GETARG_TRANSACTIONID(0);
/* maximum 32 bit unsigned integer representation takes 10 chars */
char *representation = palloc(11);

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.62 2001/03/18 20:18:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.63 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -50,36 +50,37 @@
*/
#define SYNC_METHOD_FSYNC 0
#define SYNC_METHOD_FDATASYNC 1
#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and O_DSYNC */
#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and
* O_DSYNC */
#if defined(O_SYNC)
# define OPEN_SYNC_FLAG O_SYNC
#define OPEN_SYNC_FLAG O_SYNC
#else
# if defined(O_FSYNC)
# define OPEN_SYNC_FLAG O_FSYNC
# endif
#if defined(O_FSYNC)
#define OPEN_SYNC_FLAG O_FSYNC
#endif
#endif
#if defined(OPEN_SYNC_FLAG)
# if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
# define OPEN_DATASYNC_FLAG O_DSYNC
# endif
#if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
#define OPEN_DATASYNC_FLAG O_DSYNC
#endif
#endif
#if defined(OPEN_DATASYNC_FLAG)
# define DEFAULT_SYNC_METHOD_STR "open_datasync"
# define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
# define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
#define DEFAULT_SYNC_METHOD_STR "open_datasync"
#define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
#define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
#else
# if defined(HAVE_FDATASYNC)
# define DEFAULT_SYNC_METHOD_STR "fdatasync"
# define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
# define DEFAULT_SYNC_FLAGBIT 0
# else
# define DEFAULT_SYNC_METHOD_STR "fsync"
# define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
# define DEFAULT_SYNC_FLAGBIT 0
# endif
#if defined(HAVE_FDATASYNC)
#define DEFAULT_SYNC_METHOD_STR "fdatasync"
#define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
#define DEFAULT_SYNC_FLAGBIT 0
#else
#define DEFAULT_SYNC_METHOD_STR "fsync"
#define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
#define DEFAULT_SYNC_FLAGBIT 0
#endif
#endif
@ -91,11 +92,13 @@
/* User-settable parameters */
int CheckPointSegments = 3;
int XLOGbuffers = 8;
int XLOGfiles = 0; /* how many files to pre-allocate during ckpt */
int XLOGfiles = 0; /* how many files to pre-allocate during
* ckpt */
int XLOG_DEBUG = 0;
char *XLOG_sync_method = NULL;
const char XLOG_sync_method_default[] = DEFAULT_SYNC_METHOD_STR;
char XLOG_archive_dir[MAXPGPATH]; /* null string means delete 'em */
char XLOG_archive_dir[MAXPGPATH]; /* null string means
* delete 'em */
/* these are derived from XLOG_sync_method by assign_xlog_sync_method */
static int sync_method = DEFAULT_SYNC_METHOD;
@ -229,6 +232,7 @@ typedef struct XLogCtlData
XLogwrtResult LogwrtResult;
/* Protected by logwrt_lck: */
XLogCtlWrite Write;
/*
* These values do not change after startup, although the pointed-to
* pages and xlblocks values certainly do. Permission to read/write
@ -384,8 +388,10 @@ static int readFile = -1;
static uint32 readId = 0;
static uint32 readSeg = 0;
static uint32 readOff = 0;
/* Buffer for currently read page (BLCKSZ bytes) */
static char *readBuf = NULL;
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr;
static XLogRecPtr EndRecPtr;
@ -463,8 +469,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
}
/*
* In bootstrap mode, we don't actually log anything but XLOG resources;
* return a phony record pointer.
* In bootstrap mode, we don't actually log anything but XLOG
* resources; return a phony record pointer.
*/
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
@ -479,16 +485,17 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* header isn't added into the CRC yet since we don't know the final
* length or info bits quite yet.
*
* We may have to loop back to here if a race condition is detected below.
* We could prevent the race by doing all this work while holding the
* insert spinlock, but it seems better to avoid doing CRC calculations
* while holding the lock. This means we have to be careful about
* modifying the rdata list until we know we aren't going to loop back
* again. The only change we allow ourselves to make earlier is to set
* rdt->data = NULL in list items we have decided we will have to back
* up the whole buffer for. This is OK because we will certainly decide
* the same thing again for those items if we do it over; doing it here
* saves an extra pass over the list later.
* We may have to loop back to here if a race condition is detected
* below. We could prevent the race by doing all this work while
* holding the insert spinlock, but it seems better to avoid doing CRC
* calculations while holding the lock. This means we have to be
* careful about modifying the rdata list until we know we aren't
* going to loop back again. The only change we allow ourselves to
* make earlier is to set rdt->data = NULL in list items we have
* decided we will have to back up the whole buffer for. This is OK
* because we will certainly decide the same thing again for those
* items if we do it over; doing it here saves an extra pass over the
* list later.
*/
begin:;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -499,7 +506,7 @@ begin:;
INIT_CRC64(rdata_crc);
len = 0;
for (rdt = rdata; ; )
for (rdt = rdata;;)
{
if (rdt->buffer == InvalidBuffer)
{
@ -528,10 +535,11 @@ begin:;
{
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
/*
* XXX We assume page LSN is first data on page
*/
dtbuf_lsn[i] = *((XLogRecPtr*)BufferGetBlock(rdt->buffer));
dtbuf_lsn[i] = *((XLogRecPtr *) BufferGetBlock(rdt->buffer));
if (XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
crc64 dtcrc;
@ -545,7 +553,7 @@ begin:;
dtbuf_xlg[i].node = BufferGetFileNode(dtbuf[i]);
dtbuf_xlg[i].block = BufferGetBlockNumber(dtbuf[i]);
COMP_CRC64(dtcrc,
(char*) &(dtbuf_xlg[i]) + sizeof(crc64),
(char *) &(dtbuf_xlg[i]) + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(dtcrc);
dtbuf_xlg[i].crc = dtcrc;
@ -596,9 +604,9 @@ begin:;
S_UNLOCK(&(XLogCtl->info_lck));
/*
* If cache is half filled then try to acquire logwrt lock
* and do LOGWRT work, but only once per XLogInsert call.
* Ignore any fractional blocks in performing this check.
* If cache is half filled then try to acquire logwrt lock and
* do LOGWRT work, but only once per XLogInsert call. Ignore
* any fractional blocks in performing this check.
*/
LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ;
if (do_logwrt &&
@ -625,8 +633,9 @@ begin:;
/*
* Check to see if my RedoRecPtr is out of date. If so, may have to
* go back and recompute everything. This can only happen just after a
* checkpoint, so it's better to be slow in this case and fast otherwise.
* go back and recompute everything. This can only happen just after
* a checkpoint, so it's better to be slow in this case and fast
* otherwise.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
@ -640,9 +649,10 @@ begin:;
if (dtbuf_bkp[i] == false &&
XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
/*
* Oops, this buffer now needs to be backed up, but we didn't
* think so above. Start over.
* Oops, this buffer now needs to be backed up, but we
* didn't think so above. Start over.
*/
S_UNLOCK(&(XLogCtl->insert_lck));
END_CRIT_SECTION();
@ -658,8 +668,9 @@ begin:;
* this loop, write_len includes the backup block data.
*
* Also set the appropriate info bits to show which buffers were backed
* up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
* buffer value (ignoring InvalidBuffer) appearing in the rdata list.
* up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th
* distinct buffer value (ignoring InvalidBuffer) appearing in the
* rdata list.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -671,13 +682,13 @@ begin:;
rdt->next = &(dtbuf_rdt[2 * i]);
dtbuf_rdt[2 * i].data = (char*) &(dtbuf_xlg[i]);
dtbuf_rdt[2 * i].data = (char *) &(dtbuf_xlg[i]);
dtbuf_rdt[2 * i].len = sizeof(BkpBlock);
write_len += sizeof(BkpBlock);
rdt = dtbuf_rdt[2 * i].next = &(dtbuf_rdt[2 * i + 1]);
dtbuf_rdt[2 * i + 1].data = (char*) BufferGetBlock(dtbuf[i]);
dtbuf_rdt[2 * i + 1].data = (char *) BufferGetBlock(dtbuf[i]);
dtbuf_rdt[2 * i + 1].len = BLCKSZ;
write_len += BLCKSZ;
dtbuf_rdt[2 * i + 1].next = NULL;
@ -711,7 +722,7 @@ begin:;
record->xl_rmid = rmid;
/* Now we can finish computing the main CRC */
COMP_CRC64(rdata_crc, (char*) record + sizeof(crc64),
COMP_CRC64(rdata_crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(rdata_crc);
record->xl_crc = rdata_crc;
@ -795,14 +806,15 @@ begin:;
freespace = INSERT_FREESPACE(Insert);
/*
* The recptr I return is the beginning of the *next* record.
* This will be stored as LSN for changed data pages...
* The recptr I return is the beginning of the *next* record. This
* will be stored as LSN for changed data pages...
*/
INSERT_RECPTR(RecPtr, Insert, curridx);
/* Need to update shared LogwrtRqst if some block was filled up */
if (freespace < SizeOfXLogRecord)
updrqst = true; /* curridx is filled and available for writing out */
updrqst = true; /* curridx is filled and available for
* writing out */
else
curridx = PrevBufIdx(curridx);
WriteRqst = XLogCtl->xlblocks[curridx];
@ -850,9 +862,9 @@ AdvanceXLInsertBuffer(void)
LogwrtResult = Insert->LogwrtResult;
/*
* Get ending-offset of the buffer page we need to replace (this may be
* zero if the buffer hasn't been used yet). Fall through if it's already
* written out.
* Get ending-offset of the buffer page we need to replace (this may
* be zero if the buffer hasn't been used yet). Fall through if it's
* already written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
@ -883,8 +895,8 @@ AdvanceXLInsertBuffer(void)
}
/*
* LogwrtResult lock is busy or we know the page is still dirty.
* Try to acquire logwrt lock and write full blocks.
* LogwrtResult lock is busy or we know the page is still
* dirty. Try to acquire logwrt lock and write full blocks.
*/
if (!TAS(&(XLogCtl->logwrt_lck)))
{
@ -896,9 +908,10 @@ AdvanceXLInsertBuffer(void)
Insert->LogwrtResult = LogwrtResult;
break;
}
/*
* Have to write buffers while holding insert lock.
* This is not good, so only write as much as we absolutely
* Have to write buffers while holding insert lock. This
* is not good, so only write as much as we absolutely
* must.
*/
WriteRqst.Write = OldPageRqstPtr;
@ -933,14 +946,15 @@ AdvanceXLInsertBuffer(void)
}
Insert->curridx = nextidx;
Insert->currpage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ);
Insert->currpos = ((char*) Insert->currpage) + SizeOfXLogPHD;
Insert->currpos = ((char *) Insert->currpage) + SizeOfXLogPHD;
/*
* Be sure to re-zero the buffer so that bytes beyond what we've written
* will look like zeroes and not valid XLOG records...
* Be sure to re-zero the buffer so that bytes beyond what we've
* written will look like zeroes and not valid XLOG records...
*/
MemSet((char*) Insert->currpage, 0, BLCKSZ);
MemSet((char *) Insert->currpage, 0, BLCKSZ);
Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC;
/* Insert->currpage->xlp_info = 0; */ /* done by memset */
/* Insert->currpage->xlp_info = 0; *//* done by memset */
Insert->currpage->xlp_sui = ThisStartUpID;
return update_needed;
@ -959,11 +973,15 @@ XLogWrite(XLogwrtRqst WriteRqst)
bool ispartialpage;
bool use_existent;
/* Update local LogwrtResult (caller probably did this already, but...) */
/*
* Update local LogwrtResult (caller probably did this already,
* but...)
*/
LogwrtResult = Write->LogwrtResult;
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{
/*
* Make sure we're not ahead of the insert process. This could
* happen if we're passed a bogus WriteRqst.Write that is past the
@ -979,6 +997,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
/*
* Switch to new logfile segment.
*/
@ -1011,11 +1030,12 @@ XLogWrite(XLogwrtRqst WriteRqst)
ControlFile->logSeg = openLogSeg + 1;
ControlFile->time = time(NULL);
UpdateControlFile();
/*
* Signal postmaster to start a checkpoint if it's been too
* long since the last one. (We look at local copy of
* RedoRecPtr which might be a little out of date, but should
* be close enough for this purpose.)
* Signal postmaster to start a checkpoint if it's been
* too long since the last one. (We look at local copy of
* RedoRecPtr which might be a little out of date, but
* should be close enough for this purpose.)
*/
if (IsUnderPostmaster &&
(openLogId != RedoRecPtr.xlogid ||
@ -1056,9 +1076,9 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* If we just wrote the whole last page of a logfile segment,
* fsync the segment immediately. This avoids having to go back
* and re-open prior segments when an fsync request comes along later.
* Doing it here ensures that one and only one backend will perform
* this fsync.
* and re-open prior segments when an fsync request comes along
* later. Doing it here ensures that one and only one backend will
* perform this fsync.
*/
if (openLogOff >= XLogSegSize && !ispartialpage)
{
@ -1081,10 +1101,11 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{
/*
* Could get here without iterating above loop, in which case
* we might have no open file or the wrong one. However, we do
* not need to fsync more than one file.
* Could get here without iterating above loop, in which case we
* might have no open file or the wrong one. However, we do not
* need to fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN)
{
@ -1110,8 +1131,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* Update shared-memory status
*
* We make sure that the shared 'request' values do not fall behind
* the 'result' values. This is not absolutely essential, but it saves
* We make sure that the shared 'request' values do not fall behind the
* 'result' values. This is not absolutely essential, but it saves
* some code in a couple of places.
*/
S_LOCK(&(XLogCtl->info_lck));
@ -1163,8 +1184,9 @@ XLogFlush(XLogRecPtr record)
* Since fsync is usually a horribly expensive operation, we try to
* piggyback as much data as we can on each fsync: if we see any more
* data entered into the xlog buffer, we'll write and fsync that too,
* so that the final value of LogwrtResult.Flush is as large as possible.
* This gives us some chance of avoiding another fsync immediately after.
* so that the final value of LogwrtResult.Flush is as large as
* possible. This gives us some chance of avoiding another fsync
* immediately after.
*/
/* initialize to given target; may increase below */
@ -1192,9 +1214,7 @@ XLogFlush(XLogRecPtr record)
uint32 freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord) /* buffer is full */
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
}
else
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
@ -1257,7 +1277,8 @@ XLogFileInit(uint32 log, uint32 seg,
XLogFileName(path, log, seg);
/*
* Try to use existent file (checkpoint maker may have created it already)
* Try to use existent file (checkpoint maker may have created it
* already)
*/
if (*use_existent)
{
@ -1270,14 +1291,14 @@ XLogFileInit(uint32 log, uint32 seg,
log, seg);
}
else
return(fd);
return (fd);
}
/*
* Initialize an empty (all zeroes) segment. NOTE: it is possible that
* another process is doing the same thing. If so, we will end up
* pre-creating an extra log segment. That seems OK, and better than
* holding the spinlock throughout this lengthy process.
* Initialize an empty (all zeroes) segment. NOTE: it is possible
* that another process is doing the same thing. If so, we will end
* up pre-creating an extra log segment. That seems OK, and better
* than holding the spinlock throughout this lengthy process.
*/
snprintf(tmppath, MAXPGPATH, "%s%cxlogtemp.%d",
XLogDir, SEP_CHAR, (int) getpid());
@ -1306,7 +1327,10 @@ XLogFileInit(uint32 log, uint32 seg,
{
int save_errno = errno;
/* If we fail to make the file, delete it to release disk space */
/*
* If we fail to make the file, delete it to release disk
* space
*/
unlink(tmppath);
errno = save_errno;
@ -1336,10 +1360,8 @@ XLogFileInit(uint32 log, uint32 seg,
targseg = seg;
strcpy(targpath, path);
if (! *use_existent)
{
if (!*use_existent)
unlink(targpath);
}
else
{
while ((fd = BasicOpenFile(targpath, O_RDWR | PG_BINARY,
@ -1499,13 +1521,13 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
char *blk;
int i;
blk = (char*)XLogRecGetData(record) + record->xl_len;
blk = (char *) XLogRecGetData(record) + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
continue;
memcpy((char*)&bkpb, blk, sizeof(BkpBlock));
memcpy((char *) &bkpb, blk, sizeof(BkpBlock));
blk += sizeof(BkpBlock);
reln = XLogOpenRelation(true, record->xl_rmid, bkpb.node);
@ -1516,7 +1538,7 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
memcpy((char*)page, blk, BLCKSZ);
memcpy((char *) page, blk, BLCKSZ);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
@ -1546,7 +1568,7 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
/* Check CRC of rmgr data and record header */
INIT_CRC64(crc);
COMP_CRC64(crc, XLogRecGetData(record), len);
COMP_CRC64(crc, (char*) record + sizeof(crc64),
COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
@ -1554,11 +1576,11 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
{
elog(emode, "ReadRecord: bad rmgr data CRC in record at %u/%u",
recptr.xlogid, recptr.xrecoff);
return(false);
return (false);
}
/* Check CRCs of backup blocks, if any */
blk = (char*)XLogRecGetData(record) + len;
blk = (char *) XLogRecGetData(record) + len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
@ -1569,18 +1591,19 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
COMP_CRC64(crc, blk + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(crc);
memcpy((char*)&cbuf, blk, sizeof(crc64)); /* don't assume alignment */
memcpy((char *) &cbuf, blk, sizeof(crc64)); /* don't assume
* alignment */
if (!EQ_CRC64(cbuf, crc))
{
elog(emode, "ReadRecord: bad bkp block %d CRC in record at %u/%u",
i + 1, recptr.xlogid, recptr.xrecoff);
return(false);
return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
}
return(true);
return (true);
}
/*
@ -1609,13 +1632,14 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
if (readBuf == NULL)
{
/*
* First time through, permanently allocate readBuf. We do it
* this way, rather than just making a static array, for two
* reasons: (1) no need to waste the storage in most instantiations
* of the backend; (2) a static char array isn't guaranteed to
* have any particular alignment, whereas malloc() will provide
* MAXALIGN'd storage.
* reasons: (1) no need to waste the storage in most
* instantiations of the backend; (2) a static char array isn't
* guaranteed to have any particular alignment, whereas malloc()
* will provide MAXALIGN'd storage.
*/
readBuf = (char *) malloc(BLCKSZ);
Assert(readBuf != NULL);
@ -1656,7 +1680,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
readFile = XLogFileOpen(readId, readSeg, (emode == LOG));
if (readFile < 0)
goto next_record_is_invalid;
readOff = (uint32) (-1); /* force read to occur below */
readOff = (uint32) (-1);/* force read to occur below */
}
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@ -1688,9 +1712,10 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
got_record:;
/*
* Currently, xl_len == 0 must be bad data, but that might not be
* true forever. See note in XLogInsert.
* Currently, xl_len == 0 must be bad data, but that might not be true
* forever. See note in XLogInsert.
*/
if (record->xl_len == 0)
{
@ -1698,8 +1723,10 @@ got_record:;
RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
/*
* Compute total length of record including any appended backup blocks.
* Compute total length of record including any appended backup
* blocks.
*/
total_len = SizeOfXLogRecord + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -1708,6 +1735,7 @@ got_record:;
continue;
total_len += sizeof(BkpBlock) + BLCKSZ;
}
/*
* Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea
@ -1774,7 +1802,7 @@ got_record:;
len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord;
if (contrecord->xl_rem_len > len)
{
memcpy(buffer, (char *)contrecord + SizeOfXLogContRecord, len);
memcpy(buffer, (char *) contrecord + SizeOfXLogContRecord, len);
gotlen += len;
buffer += len;
continue;
@ -1839,14 +1867,16 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_info, readId, readSeg, readOff);
return false;
}
/*
* We disbelieve a SUI less than the previous page's SUI, or more
* than a few counts greater. In theory as many as 512 shutdown
* checkpoint records could appear on a 32K-sized xlog page, so
* that's the most differential there could legitimately be.
* We disbelieve a SUI less than the previous page's SUI, or more than
* a few counts greater. In theory as many as 512 shutdown checkpoint
* records could appear on a 32K-sized xlog page, so that's the most
* differential there could legitimately be.
*
* Note this check can only be applied when we are reading the next page
* in sequence, so ReadRecord passes a flag indicating whether to check.
* in sequence, so ReadRecord passes a flag indicating whether to
* check.
*/
if (checkSUI)
{
@ -1891,8 +1921,10 @@ WriteControlFile(void)
{
int fd;
char buffer[BLCKSZ]; /* need not be aligned */
#ifdef USE_LOCALE
char *localeptr;
#endif
/*
@ -1911,10 +1943,11 @@ WriteControlFile(void)
if (!localeptr)
elog(STOP, "Invalid LC_CTYPE setting");
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
/*
* Issue warning notice if initdb'ing in a locale that will not permit
* LIKE index optimization. This is not a clean place to do it, but
* I don't see a better place either...
* LIKE index optimization. This is not a clean place to do it, but I
* don't see a better place either...
*/
if (!locale_is_like_safe())
elog(NOTICE, "Initializing database with %s collation order."
@ -1931,16 +1964,16 @@ WriteControlFile(void)
/* Contents are protected with a CRC */
INIT_CRC64(ControlFile->crc);
COMP_CRC64(ControlFile->crc,
(char*) ControlFile + sizeof(crc64),
(char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
/*
* We write out BLCKSZ bytes into pg_control, zero-padding the
* excess over sizeof(ControlFileData). This reduces the odds
* of premature-EOF errors when reading pg_control. We'll still
* fail when we check the contents of the file, but hopefully with
* a more specific error than "couldn't read pg_control".
* We write out BLCKSZ bytes into pg_control, zero-padding the excess
* over sizeof(ControlFileData). This reduces the odds of
* premature-EOF errors when reading pg_control. We'll still fail
* when we check the contents of the file, but hopefully with a more
* specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
elog(STOP, "sizeof(ControlFileData) is too large ... fix xlog.c");
@ -1994,7 +2027,7 @@ ReadControlFile(void)
/* Now check the CRC. */
INIT_CRC64(crc);
COMP_CRC64(crc,
(char*) ControlFile + sizeof(crc64),
(char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(crc);
@ -2002,10 +2035,11 @@ ReadControlFile(void)
elog(STOP, "Invalid CRC in control file");
/*
* Do compatibility checking immediately. We do this here for 2 reasons:
* Do compatibility checking immediately. We do this here for 2
* reasons:
*
* (1) if the database isn't compatible with the backend executable,
* we want to abort before we can possibly do any damage;
* (1) if the database isn't compatible with the backend executable, we
* want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file
@ -2043,7 +2077,7 @@ UpdateControlFile(void)
INIT_CRC64(ControlFile->crc);
COMP_CRC64(ControlFile->crc,
(char*) ControlFile + sizeof(crc64),
(char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
@ -2096,6 +2130,7 @@ XLOGShmemInit(void)
Assert(!found);
memset(XLogCtl, 0, sizeof(XLogCtlData));
/*
* Since XLogCtlData contains XLogRecPtr fields, its sizeof should be
* a multiple of the alignment for same, so no extra alignment padding
@ -2104,9 +2139,10 @@ XLOGShmemInit(void)
XLogCtl->xlblocks = (XLogRecPtr *)
(((char *) XLogCtl) + sizeof(XLogCtlData));
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
/*
* Here, on the other hand, we must MAXALIGN to ensure the page buffers
* have worst-case alignment.
* Here, on the other hand, we must MAXALIGN to ensure the page
* buffers have worst-case alignment.
*/
XLogCtl->pages =
((char *) XLogCtl) + MAXALIGN(sizeof(XLogCtlData) +
@ -2114,8 +2150,8 @@ XLOGShmemInit(void)
memset(XLogCtl->pages, 0, BLCKSZ * XLOGbuffers);
/*
* Do basic initialization of XLogCtl shared data.
* (StartupXLOG will fill in additional info.)
* Do basic initialization of XLogCtl shared data. (StartupXLOG will
* fill in additional info.)
*/
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
@ -2180,7 +2216,7 @@ BootStrapXLOG(void)
INIT_CRC64(crc);
COMP_CRC64(crc, &checkPoint, sizeof(checkPoint));
COMP_CRC64(crc, (char*) record + sizeof(crc64),
COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
record->xl_crc = crc;
@ -2246,8 +2282,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
* Note: in most control paths, *ControlFile is already valid and we
* need not do ReadControlFile() here, but might as well do it to be sure.
* Note: in most control paths, *ControlFile is already valid and we need
* not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@ -2297,10 +2333,8 @@ StartupXLOG(void)
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
{
elog(STOP, "Unable to locate a valid CheckPoint record");
}
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
@ -2336,9 +2370,7 @@ StartupXLOG(void)
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
{
InRecovery = true;
}
/* REDO */
if (InRecovery)
@ -2355,7 +2387,8 @@ StartupXLOG(void)
/* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr))
record = ReadRecord(&(checkPoint.redo), STOP, buffer);
else /* read past CheckPoint record */
else
/* read past CheckPoint record */
record = ReadRecord(NULL, LOG, buffer);
if (record != NULL)
@ -2411,8 +2444,11 @@ StartupXLOG(void)
XLogCtl->xlblocks[0].xrecoff =
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
Insert = &XLogCtl->Insert;
/* Tricky point here: readBuf contains the *last* block that the LastRec
* record spans, not the one it starts in, which is what we want.
/*
* Tricky point here: readBuf contains the *last* block that the
* LastRec record spans, not the one it starts in, which is what we
* want.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@ -2458,6 +2494,7 @@ StartupXLOG(void)
if (InRecovery)
{
/*
* In case we had to use the secondary checkpoint, make sure that
* it will still be shown as the secondary checkpoint after this
@ -2639,17 +2676,17 @@ CreateCheckPoint(bool shutdown)
/*
* If this isn't a shutdown, and we have not inserted any XLOG records
* since the start of the last checkpoint, skip the checkpoint. The
* idea here is to avoid inserting duplicate checkpoints when the system
* is idle. That wastes log space, and more importantly it exposes us to
* possible loss of both current and previous checkpoint records if the
* machine crashes just as we're writing the update. (Perhaps it'd make
* even more sense to checkpoint only when the previous checkpoint record
* is in a different xlog page?)
* idea here is to avoid inserting duplicate checkpoints when the
* system is idle. That wastes log space, and more importantly it
* exposes us to possible loss of both current and previous checkpoint
* records if the machine crashes just as we're writing the update.
* (Perhaps it'd make even more sense to checkpoint only when the
* previous checkpoint record is in a different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must match
* the end of the last checkpoint record, and its redo pointer must point
* to itself.
* the start of the last checkpoint: current insertion point must
* match the end of the last checkpoint record, and its redo pointer
* must point to itself.
*/
if (!shutdown)
{
@ -2687,16 +2724,18 @@ CreateCheckPoint(bool shutdown)
freespace = BLCKSZ - SizeOfXLogPHD;
}
INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx);
/*
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock.
*/
RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
/*
* Get UNDO record ptr - this is oldest of PROC->logRec values.
* We do this while holding insert lock to ensure that we won't miss
* any about-to-commit transactions (UNDO must include all xacts that
* have commits after REDO point).
* Get UNDO record ptr - this is oldest of PROC->logRec values. We do
* this while holding insert lock to ensure that we won't miss any
* about-to-commit transactions (UNDO must include all xacts that have
* commits after REDO point).
*/
checkPoint.undo = GetUndoRecPtr();
@ -2720,8 +2759,8 @@ CreateCheckPoint(bool shutdown)
SpinRelease(OidGenLockId);
/*
* Having constructed the checkpoint record, ensure all shmem disk buffers
* are flushed to disk.
* Having constructed the checkpoint record, ensure all shmem disk
* buffers are flushed to disk.
*/
FlushBufferPool();
@ -2729,7 +2768,7 @@ CreateCheckPoint(bool shutdown)
* Now insert the checkpoint record into XLOG.
*/
rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&checkPoint);
rdata.data = (char *) (&checkPoint);
rdata.len = sizeof(checkPoint);
rdata.next = NULL;
@ -2748,9 +2787,9 @@ CreateCheckPoint(bool shutdown)
elog(STOP, "XLog concurrent activity while data base is shutting down");
/*
* Remember location of prior checkpoint's earliest info.
* Oldest item is redo or undo, whichever is older; but watch out
* for case that undo = 0.
* Remember location of prior checkpoint's earliest info. Oldest item
* is redo or undo, whichever is older; but watch out for case that
* undo = 0.
*/
if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
XLByteLT(ControlFile->checkPointCopy.undo,
@ -2804,7 +2843,7 @@ XLogPutNextOid(Oid nextOid)
XLogRecData rdata;
rdata.buffer = InvalidBuffer;
rdata.data = (char *)(&nextOid);
rdata.data = (char *) (&nextOid);
rdata.len = sizeof(Oid);
rdata.next = NULL;
(void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata);
@ -2846,9 +2885,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
/* In an ONLINE checkpoint, treat the counters like NEXTOID */
if (ShmemVariableCache->nextXid < checkPoint.nextXid)
{
ShmemVariableCache->nextXid = checkPoint.nextXid;
}
if (ShmemVariableCache->nextOid < checkPoint.nextOid)
{
ShmemVariableCache->nextOid = checkPoint.nextOid;
@ -2863,14 +2900,15 @@ xlog_undo(XLogRecPtr lsn, XLogRecord *record)
}
void
xlog_desc(char *buf, uint8 xl_info, char* rec)
xlog_desc(char *buf, uint8 xl_info, char *rec)
{
uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE)
{
CheckPoint *checkpoint = (CheckPoint*) rec;
CheckPoint *checkpoint = (CheckPoint *) rec;
sprintf(buf + strlen(buf), "checkpoint: redo %u/%u; undo %u/%u; "
"sui %u; xid %u; oid %u; %s",
checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
@ -2923,15 +2961,19 @@ xlog_outrec(char *buf, XLogRecord *record)
bool
check_xlog_sync_method(const char *method)
{
if (strcasecmp(method, "fsync") == 0) return true;
if (strcasecmp(method, "fsync") == 0)
return true;
#ifdef HAVE_FDATASYNC
if (strcasecmp(method, "fdatasync") == 0) return true;
if (strcasecmp(method, "fdatasync") == 0)
return true;
#endif
#ifdef OPEN_SYNC_FLAG
if (strcasecmp(method, "open_sync") == 0) return true;
if (strcasecmp(method, "open_sync") == 0)
return true;
#endif
#ifdef OPEN_DATASYNC_FLAG
if (strcasecmp(method, "open_datasync") == 0) return true;
if (strcasecmp(method, "open_datasync") == 0)
return true;
#endif
return false;
}
@ -2978,11 +3020,12 @@ assign_xlog_sync_method(const char *method)
if (sync_method != new_sync_method || open_sync_bit != new_sync_bit)
{
/*
* To ensure that no blocks escape unsynced, force an fsync on
* the currently open log segment (if any). Also, if the open
* flag is changing, close the log file so it will be reopened
* (with new flag bit) at next use.
* To ensure that no blocks escape unsynced, force an fsync on the
* currently open log segment (if any). Also, if the open flag is
* changing, close the log file so it will be reopened (with new
* flag bit) at next use.
*/
if (openLogFile >= 0)
{

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.14 2001/03/13 01:17:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.15 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -52,11 +52,11 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
return(0);
return (0);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
return(0);
return (0);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@ -64,13 +64,13 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
return(0);
return (0);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
return(0);
return (0);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@ -79,11 +79,11 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
if (htup->t_xmin != xid || htup->t_cmin != cid)
{
UnlockAndReleaseBuffer(buffer);
return(-1);
return (-1);
}
UnlockAndReleaseBuffer(buffer);
return(1);
return (1);
}
/*
@ -103,11 +103,11 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
return(false);
return (false);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
return(false);
return (false);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@ -115,21 +115,21 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
return(false);
return (false);
}
if (PageGetSUI(page) != ThisStartUpID)
{
Assert(PageGetSUI(page) < ThisStartUpID);
UnlockAndReleaseBuffer(buffer);
return(true);
return (true);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
return(false);
return (false);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@ -140,16 +140,16 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
if (htup->t_infomask & HEAP_XMIN_INVALID ||
(htup->t_infomask & HEAP_MOVED_IN &&
TransactionIdDidAbort((TransactionId)htup->t_cmin)) ||
TransactionIdDidAbort((TransactionId) htup->t_cmin)) ||
TransactionIdDidAbort(htup->t_xmin))
{
UnlockAndReleaseBuffer(buffer);
return(false);
return (false);
}
}
UnlockAndReleaseBuffer(buffer);
return(true);
return (true);
}
/*
@ -208,13 +208,13 @@ XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
}
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
return(buffer);
return (buffer);
}
buffer = ReadBuffer(reln, blkno);
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
return(buffer);
return (buffer);
}
/*
@ -239,6 +239,7 @@ static XLogRelDesc *_xlrelarr = NULL;
static Form_pg_class _xlpgcarr = NULL;
static int _xlast = 0;
static int _xlcnt = 0;
#define _XLOG_RELCACHESIZE 512
static void
@ -248,7 +249,7 @@ _xl_init_rel_cache(void)
_xlcnt = _XLOG_RELCACHESIZE;
_xlast = 0;
_xlrelarr = (XLogRelDesc*) malloc(sizeof(XLogRelDesc) * _xlcnt);
_xlrelarr = (XLogRelDesc *) malloc(sizeof(XLogRelDesc) * _xlcnt);
memset(_xlrelarr, 0, sizeof(XLogRelDesc) * _xlcnt);
_xlpgcarr = (Form_pg_class) malloc(sizeof(FormData_pg_class) * _xlcnt);
memset(_xlpgcarr, 0, sizeof(FormData_pg_class) * _xlcnt);
@ -258,7 +259,7 @@ _xl_init_rel_cache(void)
memset(&ctl, 0, (int) sizeof(ctl));
ctl.keysize = sizeof(RelFileNode);
ctl.datasize = sizeof(XLogRelDesc*);
ctl.datasize = sizeof(XLogRelDesc *);
ctl.hash = tag_hash;
_xlrelcache = hash_create(_XLOG_RELCACHESIZE, &ctl,
@ -276,8 +277,8 @@ _xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
rdesc->lessRecently->moreRecently = rdesc->moreRecently;
rdesc->moreRecently->lessRecently = rdesc->lessRecently;
hentry = (XLogRelCacheEntry*) hash_search(_xlrelcache,
(char*)&(rdesc->reldata.rd_node), HASH_REMOVE, &found);
hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
(char *) &(rdesc->reldata.rd_node), HASH_REMOVE, &found);
if (hentry == NULL)
elog(STOP, "_xl_remove_hash_entry: can't delete from cache");
@ -294,7 +295,7 @@ _xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
return;
}
static XLogRelDesc*
static XLogRelDesc *
_xl_new_reldesc(void)
{
XLogRelDesc *res;
@ -303,7 +304,7 @@ _xl_new_reldesc(void)
if (_xlast < _xlcnt)
{
_xlrelarr[_xlast].reldata.rd_rel = &(_xlpgcarr[_xlast]);
return(&(_xlrelarr[_xlast]));
return (&(_xlrelarr[_xlast]));
}
/* reuse */
@ -312,7 +313,7 @@ _xl_new_reldesc(void)
_xl_remove_hash_entry(&res, 0);
_xlast--;
return(res);
return (res);
}
@ -348,8 +349,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
XLogRelCacheEntry *hentry;
bool found;
hentry = (XLogRelCacheEntry*)
hash_search(_xlrelcache, (char*)&rnode, HASH_FIND, &found);
hentry = (XLogRelCacheEntry *)
hash_search(_xlrelcache, (char *) &rnode, HASH_FIND, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: error in cache");
@ -372,8 +373,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
res->reldata.rd_node = rnode;
hentry = (XLogRelCacheEntry*)
hash_search(_xlrelcache, (char*)&rnode, HASH_ENTER, &found);
hentry = (XLogRelCacheEntry *)
hash_search(_xlrelcache, (char *) &rnode, HASH_ENTER, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: can't insert into cache");
@ -385,7 +386,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_fd = -1;
res->reldata.rd_fd = smgropen(DEFAULT_SMGR, &(res->reldata),
true /* allow failure */);
true /* allow failure */ );
}
res->moreRecently = &(_xlrelarr[0]);
@ -394,7 +395,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->lessRecently->moreRecently = res;
if (res->reldata.rd_fd < 0) /* file doesn't exist */
return(NULL);
return (NULL);
return(&(res->reldata));
return (&(res->reldata));
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.46 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.47 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* See acl.h.
@ -250,8 +250,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
num;
/*
* If ACL is null, default to "OK" --- this should not happen,
* since caller should have inserted appropriate default
* If ACL is null, default to "OK" --- this should not happen, since
* caller should have inserted appropriate default
*/
if (!acl)
{
@ -265,8 +265,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
/*
* We'll treat the empty ACL like that, too, although this is more
* like an error (i.e., you manually blew away your ACL array) -- the
* system never creates an empty ACL, since there must always be
* a "world" entry in the first slot.
* system never creates an empty ACL, since there must always be a
* "world" entry in the first slot.
*/
if (num < 1)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.39 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.40 2001/03/22 03:59:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.160 2001/02/14 21:34:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.161 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -270,7 +270,11 @@ heap_create(char *relname,
if (istemp)
{
/* replace relname of caller with a unique name for a temp relation */
/*
* replace relname of caller with a unique name for a temp
* relation
*/
snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u",
(int) MyProcPid, uniqueId++);
}
@ -738,6 +742,7 @@ AddNewRelationTuple(Relation pg_class_desc,
static void
AddNewRelationType(char *typeName, Oid new_rel_oid, Oid new_type_oid)
{
/*
* The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out
@ -1025,9 +1030,7 @@ RelationRemoveInheritance(Relation relation)
&entry);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
simple_heap_delete(catalogRelation, &tuple->t_self);
}
heap_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
@ -1152,8 +1155,8 @@ RelationTruncateIndexes(Oid heapId)
/*
* We have to re-open the heap rel each time through this loop
* because index_build will close it again. We need grab no lock,
* however, because we assume heap_truncate is holding an exclusive
* lock on the heap rel.
* however, because we assume heap_truncate is holding an
* exclusive lock on the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
@ -1164,8 +1167,8 @@ RelationTruncateIndexes(Oid heapId)
LockRelation(currentIndex, AccessExclusiveLock);
/*
* Drop any buffers associated with this index. If they're
* dirty, they're just dropped without bothering to flush to disk.
* Drop any buffers associated with this index. If they're dirty,
* they're just dropped without bothering to flush to disk.
*/
DropRelationBuffers(currentIndex);
@ -1177,6 +1180,7 @@ RelationTruncateIndexes(Oid heapId)
InitIndexStrategy(indexInfo->ii_NumIndexAttrs,
currentIndex, accessMethodId);
index_build(heapRelation, currentIndex, indexInfo, NULL);
/*
* index_build will close both the heap and index relations (but
* not give up the locks we hold on them).
@ -1981,9 +1985,7 @@ RemoveAttrDefault(Relation rel)
adscan = heap_beginscan(adrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(adscan, 0)))
{
simple_heap_delete(adrel, &tup->t_self);
}
heap_endscan(adscan);
heap_close(adrel, RowExclusiveLock);
@ -2005,9 +2007,7 @@ RemoveRelCheck(Relation rel)
rcscan = heap_beginscan(rcrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(rcscan, 0)))
{
simple_heap_delete(rcrel, &tup->t_self);
}
heap_endscan(rcscan);
heap_close(rcrel, RowExclusiveLock);
@ -2044,9 +2044,7 @@ RemoveStatistics(Relation rel)
scan = heap_beginscan(pgstatistic, false, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
simple_heap_delete(pgstatistic, &tuple->t_self);
}
heap_endscan(scan);
heap_close(pgstatistic, RowExclusiveLock);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.142 2001/02/23 09:31:52 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.143 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -301,7 +301,8 @@ ConstructTupleDescriptor(Relation heapRelation,
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
/*
* Fix the stuff that should not be the same as the underlying attr
* Fix the stuff that should not be the same as the underlying
* attr
*/
to->attnum = i + 1;
@ -311,9 +312,9 @@ ConstructTupleDescriptor(Relation heapRelation,
to->attcacheoff = -1;
/*
* We do not yet have the correct relation OID for the index,
* so just set it invalid for now. InitializeAttributeOids()
* will fix it later.
* We do not yet have the correct relation OID for the index, so
* just set it invalid for now. InitializeAttributeOids() will
* fix it later.
*/
to->attrelid = InvalidOid;
}
@ -1008,9 +1009,7 @@ index_create(char *heapRelationName,
/* XXX shouldn't we close the heap and index rels here? */
}
else
{
index_build(heapRelation, indexRelation, indexInfo, NULL);
}
}
/* ----------------------------------------------------------------
@ -1081,12 +1080,12 @@ index_drop(Oid indexId)
heap_freetuple(tuple);
/*
* Update the pg_class tuple for the owning relation. We are presently
* too lazy to attempt to compute the new correct value of relhasindex
* (the next VACUUM will fix it if necessary). But we must send out a
* shared-cache-inval notice on the owning relation to ensure other
* backends update their relcache lists of indexes. So, unconditionally
* do setRelhasindex(true).
* Update the pg_class tuple for the owning relation. We are
* presently too lazy to attempt to compute the new correct value of
* relhasindex (the next VACUUM will fix it if necessary). But we
* must send out a shared-cache-inval notice on the owning relation to
* ensure other backends update their relcache lists of indexes. So,
* unconditionally do setRelhasindex(true).
*/
setRelhasindex(heapId, true);
@ -1199,7 +1198,7 @@ BuildIndexInfo(HeapTuple indexTuple)
{
ii->ii_NumIndexAttrs = 1;
/* Do a lookup on the function, too */
fmgr_info(indexStruct->indproc, & ii->ii_FuncInfo);
fmgr_info(indexStruct->indproc, &ii->ii_FuncInfo);
}
else
ii->ii_NumIndexAttrs = numKeys;
@ -1326,8 +1325,8 @@ LockClassinfoForUpdate(Oid relid, HeapTuple rtup,
Relation relationRelation;
/*
* NOTE: get and hold RowExclusiveLock on pg_class, because caller will
* probably modify the rel's pg_class tuple later on.
* NOTE: get and hold RowExclusiveLock on pg_class, because caller
* will probably modify the rel's pg_class tuple later on.
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
classTuple = SearchSysCache(RELOID, PointerGetDatum(relid),
@ -1513,7 +1512,8 @@ setRelhasindex(Oid relid, bool hasindex)
void
setNewRelfilenode(Relation relation)
{
Relation pg_class, idescs[Num_pg_class_indices];
Relation pg_class,
idescs[Num_pg_class_indices];
Oid newrelfilenode;
bool in_place_update = false;
HeapTupleData lockTupleData;
@ -1577,6 +1577,7 @@ setNewRelfilenode(Relation relation)
/* Make sure the relfilenode change */
CommandCounterIncrement();
}
#endif /* OLD_FILE_NAMING */
/* ----------------
@ -1713,6 +1714,7 @@ UpdateStats(Oid relid, long reltuples)
*/
if (in_place_upd)
{
/*
* At bootstrap time, we don't need to worry about concurrency or
* visibility of changes, so we cheat. Also cheat if REINDEX.
@ -1787,9 +1789,11 @@ DefaultBuild(Relation heapRelation,
long reltuples,
indtuples;
Node *predicate = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult insertResult;
@ -1855,6 +1859,7 @@ DefaultBuild(Relation heapRelation,
reltuples++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -1906,9 +1911,7 @@ DefaultBuild(Relation heapRelation,
#ifndef OMIT_PARTIAL_INDEX
if (predicate != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -2098,9 +2101,10 @@ reindex_index(Oid indexId, bool force, bool inplace)
if (inplace)
{
/*
* Release any buffers associated with this index. If they're dirty,
* they're just dropped without bothering to flush to disk.
* Release any buffers associated with this index. If they're
* dirty, they're just dropped without bothering to flush to disk.
*/
DropRelationBuffers(iRel);
@ -2164,18 +2168,24 @@ reindex_relation(Oid relid, bool force)
bool old,
reindexed;
bool deactivate_needed, overwrite, upd_pg_class_inplace;
bool deactivate_needed,
overwrite,
upd_pg_class_inplace;
#ifdef OLD_FILE_NAMING
overwrite = upd_pg_class_inplace = deactivate_needed = true;
#else
Relation rel;
overwrite = upd_pg_class_inplace = deactivate_needed = false;
/*
* avoid heap_update() pg_class tuples while processing
* reindex for pg_class.
* avoid heap_update() pg_class tuples while processing reindex for
* pg_class.
*/
if (IsIgnoringSystemIndexes())
upd_pg_class_inplace = true;
/*
* ignore the indexes of the target system relation while processing
* reindex.
@ -2184,10 +2194,10 @@ reindex_relation(Oid relid, bool force)
if (!IsIgnoringSystemIndexes() && IsSystemRelationName(NameStr(rel->rd_rel->relname)))
deactivate_needed = true;
#ifndef ENABLE_REINDEX_NAILED_RELATIONS
/*
* nailed relations are never updated.
* We couldn't keep the consistency between the relation
* descriptors and pg_class tuples.
* nailed relations are never updated. We couldn't keep the
* consistency between the relation descriptors and pg_class tuples.
*/
if (rel->rd_isnailed)
{
@ -2200,9 +2210,10 @@ reindex_relation(Oid relid, bool force)
elog(ERROR, "the target relation %u is nailed", relid);
}
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
/*
* Shared system indexes must be overwritten because it's
* impossible to update pg_class tuples of all databases.
* Shared system indexes must be overwritten because it's impossible
* to update pg_class tuples of all databases.
*/
if (IsSharedSystemRelationName(NameStr(rel->rd_rel->relname)))
{
@ -2252,24 +2263,27 @@ reindex_relation(Oid relid, bool force)
heap_endscan(scan);
heap_close(indexRelation, AccessShareLock);
if (reindexed)
/*
* Ok,we could use the reindexed indexes of the target
* system relation now.
* Ok,we could use the reindexed indexes of the target system
* relation now.
*/
{
if (deactivate_needed)
{
if (!overwrite && relid == RelOid_pg_class)
{
/*
* For pg_class, relhasindex should be set
* to true here in place.
* For pg_class, relhasindex should be set to true here in
* place.
*/
setRelhasindex(relid, true);
CommandCounterIncrement();
/*
* However the following setRelhasindex()
* is needed to keep consistency with WAL.
* However the following setRelhasindex() is needed to
* keep consistency with WAL.
*/
}
setRelhasindex(relid, true);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.76 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.77 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.37 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.38 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -79,8 +79,8 @@ AggregateCreate(char *aggName,
/*
* Handle the aggregate's base type (input data type). This can be
* specified as 'ANY' for a data-independent transition function,
* such as COUNT(*).
* specified as 'ANY' for a data-independent transition function, such
* as COUNT(*).
*/
basetype = GetSysCacheOid(TYPENAME,
PointerGetDatum(aggbasetypeName),
@ -118,9 +118,7 @@ AggregateCreate(char *aggName,
nargs = 2;
}
else
{
nargs = 1;
}
tup = SearchSysCache(PROCNAME,
PointerGetDatum(aggtransfnName),
Int32GetDatum(nargs),
@ -134,16 +132,17 @@ AggregateCreate(char *aggName,
if (proc->prorettype != transtype)
elog(ERROR, "AggregateCreate: return type of '%s' is not '%s'",
aggtransfnName, aggtranstypeName);
/*
* If the transfn is strict and the initval is NULL, make sure
* input type and transtype are the same (or at least binary-
* compatible), so that it's OK to use the first input value
* as the initial transValue.
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary- compatible),
* so that it's OK to use the first input value as the initial
* transValue.
*/
if (proc->proisstrict && agginitval == NULL)
{
if (basetype != transtype &&
! IS_BINARY_COMPATIBLE(basetype, transtype))
!IS_BINARY_COMPATIBLE(basetype, transtype))
elog(ERROR, "AggregateCreate: must not omit initval when transfn is strict and transtype is not compatible with input type");
}
ReleaseSysCache(tup);
@ -168,6 +167,7 @@ AggregateCreate(char *aggName,
}
else
{
/*
* If no finalfn, aggregate result type is type of the state value
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.7 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.8 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -51,7 +51,7 @@ LargeObjectCreate(Oid loid)
*/
for (i = 0; i < Natts_pg_largeobject; i++)
{
values[i] = (Datum)NULL;
values[i] = (Datum) NULL;
nulls[i] = ' ';
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.56 2001/03/22 03:59:20 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.53 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.54 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -247,8 +247,8 @@ ProcedureCreate(char *procedureName,
* symbol. Also check for a valid function information record.
*
* We used to perform these checks only when the function was first
* called, but it seems friendlier to verify the library's validity
* at CREATE FUNCTION time.
* called, but it seems friendlier to verify the library's validity at
* CREATE FUNCTION time.
*/
if (languageObjectId == ClanguageId)
@ -355,7 +355,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlist = parse->targetList;
/*
* The last query must be a SELECT if and only if there is a return type.
* The last query must be a SELECT if and only if there is a return
* type.
*/
if (rettype == InvalidOid)
{
@ -375,8 +376,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlistlen = ExecCleanTargetListLength(tlist);
/*
* For base-type returns, the target list should have exactly one entry,
* and its type should agree with what the user declared.
* For base-type returns, the target list should have exactly one
* entry, and its type should agree with what the user declared.
*/
typerelid = typeidTypeRelid(rettype);
if (typerelid == InvalidOid)
@ -397,8 +398,8 @@ checkretval(Oid rettype, List *queryTreeList)
* If the target list is of length 1, and the type of the varnode in
* the target list is the same as the declared return type, this is
* okay. This can happen, for example, where the body of the function
* is 'SELECT (x = func2())', where func2 has the same return type
* as the function that's calling it.
* is 'SELECT (x = func2())', where func2 has the same return type as
* the function that's calling it.
*/
if (tlistlen == 1)
{
@ -408,10 +409,10 @@ checkretval(Oid rettype, List *queryTreeList)
}
/*
* By here, the procedure returns a tuple or set of tuples. This part of
* the typechecking is a hack. We look up the relation that is the
* declared return type, and be sure that attributes 1 .. n in the target
* list match the declared types.
* By here, the procedure returns a tuple or set of tuples. This part
* of the typechecking is a hack. We look up the relation that is the
* declared return type, and be sure that attributes 1 .. n in the
* target list match the declared types.
*/
reln = heap_open(typerelid, AccessShareLock);
relid = reln->rd_id;
@ -436,7 +437,7 @@ checkretval(Oid rettype, List *queryTreeList)
typeidTypeName(rettype),
typeidTypeName(tletype),
typeidTypeName(reln->rd_att->attrs[i]->atttypid),
i+1);
i + 1);
i++;
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.59 2001/02/12 20:07:21 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.60 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.14 2001/02/16 03:16:58 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -86,9 +86,10 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
CommitTransactionCommand();
return;
}
/*
* We can VACUUM ANALYZE any table except pg_statistic.
* see update_relstats
* We can VACUUM ANALYZE any table except pg_statistic. see
* update_relstats
*/
if (strcmp(NameStr(((Form_pg_class) GETSTRUCT(tuple))->relname),
StatisticRelationName) == 0)
@ -104,9 +105,11 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (!pg_ownercheck(GetUserId(), RelationGetRelationName(onerel),
RELNAME))
{
/* we already did an elog during vacuum
elog(NOTICE, "Skipping \"%s\" --- only table owner can VACUUM it",
RelationGetRelationName(onerel));
/*
* we already did an elog during vacuum elog(NOTICE, "Skipping
* \"%s\" --- only table owner can VACUUM it",
* RelationGetRelationName(onerel));
*/
heap_close(onerel, NoLock);
CommitTransactionCommand();
@ -295,15 +298,16 @@ attr_stats(Relation onerel, int attr_cnt, VacAttrStats *vacattrstats, HeapTuple
stats->nonnull_cnt++;
/*
* If the value is toasted, detoast it to avoid repeated detoastings
* and resultant memory leakage inside the comparison routines.
* If the value is toasted, detoast it to avoid repeated
* detoastings and resultant memory leakage inside the comparison
* routines.
*/
if (!stats->attr->attbyval && stats->attr->attlen == -1)
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
else
value = origvalue;
if (! stats->initialized)
if (!stats->initialized)
{
bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
/* best_cnt gets incremented below */
@ -489,22 +493,21 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
{
/*
* empty relation, so put a dummy value in
* attdispersion
* empty relation, so put a dummy value in attdispersion
*/
selratio = 0;
}
else if (stats->null_cnt <= 1 && stats->best_cnt == 1)
{
/*
* looks like we have a unique-key attribute --- flag
* this with special -1.0 flag value.
* looks like we have a unique-key attribute --- flag this
* with special -1.0 flag value.
*
* The correct dispersion is 1.0/numberOfRows, but since
* the relation row count can get updated without
* recomputing dispersion, we want to store a
* "symbolic" value and figure 1.0/numberOfRows on the
* fly.
* The correct dispersion is 1.0/numberOfRows, but since the
* relation row count can get updated without recomputing
* dispersion, we want to store a "symbolic" value and
* figure 1.0/numberOfRows on the fly.
*/
selratio = -1;
}
@ -515,8 +518,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
{
/*
* exact result when there are just 1 or 2
* values...
* exact result when there are just 1 or 2 values...
*/
double min_cnt_d = stats->min_cnt,
max_cnt_d = stats->max_cnt,
@ -552,12 +554,12 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
/*
* Create pg_statistic tuples for the relation, if we have
* gathered the right data. del_stats() previously
* deleted all the pg_statistic tuples for the rel, so we
* just have to insert new ones here.
* gathered the right data. del_stats() previously deleted
* all the pg_statistic tuples for the rel, so we just have to
* insert new ones here.
*
* Note analyze_rel() has seen to it that we won't come here
* when vacuuming pg_statistic itself.
* Note analyze_rel() has seen to it that we won't come here when
* vacuuming pg_statistic itself.
*/
if (VacAttrStatsLtGtValid(stats) && stats->initialized)
{
@ -682,6 +684,3 @@ del_stats(Oid relid, int attcnt, int *attnums)
*/
heap_close(pgstatistic, NoLock);
}

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.76 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.77 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -161,6 +161,7 @@ Async_Notify(char *relname)
/* no point in making duplicate entries in the list ... */
if (!AsyncExistsPendingNotify(relname))
{
/*
* We allocate list memory from the global malloc pool to ensure
* that it will live until we want to use it. This is probably
@ -349,9 +350,7 @@ Async_UnlistenAll()
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, key);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
{
simple_heap_delete(lRel, &lTuple->t_self);
}
heap_endscan(sRel);
heap_close(lRel, AccessExclusiveLock);
@ -499,6 +498,7 @@ AtCommit_Notify()
*/
if (kill(listenerPID, SIGUSR2) < 0)
{
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.64 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.65 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -75,8 +75,8 @@ cluster(char *oldrelname, char *oldindexname)
StrNCpy(saveoldindexname, oldindexname, NAMEDATALEN);
/*
* We grab exclusive access to the target rel and index for the duration
* of the transaction.
* We grab exclusive access to the target rel and index for the
* duration of the transaction.
*/
OldHeap = heap_openr(saveoldrelname, AccessExclusiveLock);
OIDOldHeap = RelationGetRelid(OldHeap);
@ -154,8 +154,8 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
* Need to make a copy of the tuple descriptor,
* since heap_create_with_catalog modifies it.
* Need to make a copy of the tuple descriptor, since
* heap_create_with_catalog modifies it.
*/
tupdesc = CreateTupleDescCopyConstr(OldHeapDesc);
@ -164,16 +164,15 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
allowSystemTableMods);
/*
* Advance command counter so that the newly-created
* relation's catalog tuples will be visible to heap_open.
* Advance command counter so that the newly-created relation's
* catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the new relation.
* Note that AlterTableCreateToastTable ends with
* CommandCounterIncrement(), so that the TOAST table will
* be visible for insertion.
* If necessary, create a TOAST table for the new relation. Note that
* AlterTableCreateToastTable ends with CommandCounterIncrement(), so
* that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(NewName, true);
@ -198,12 +197,12 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
/*
* Create a new index like the old one. To do this I get the info
* from pg_index, and add a new index with a temporary name (that
* will be changed later).
* from pg_index, and add a new index with a temporary name (that will
* be changed later).
*
* NOTE: index_create will cause the new index to be a temp relation
* if its parent table is, so we don't need to do anything special
* for the temp-table case here.
* NOTE: index_create will cause the new index to be a temp relation if
* its parent table is, so we don't need to do anything special for
* the temp-table case here.
*/
Old_pg_index_Tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(OIDOldIndex),
@ -266,13 +265,15 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
LocalHeapTuple.t_datamcxt = NULL;
LocalHeapTuple.t_data = NULL;
heap_fetch(LocalOldHeap, SnapshotNow, &LocalHeapTuple, &LocalBuffer);
if (LocalHeapTuple.t_data != NULL) {
if (LocalHeapTuple.t_data != NULL)
{
/*
* We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus,
* the source relation would get trashed, which is bad news
* if we abort later on. (This was a bug in releases thru 7.0)
* the source relation would get trashed, which is bad news if
* we abort later on. (This was a bug in releases thru 7.0)
*/
HeapTuple copiedTuple = heap_copytuple(&LocalHeapTuple);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.122 2001/02/27 22:07:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
@ -180,7 +180,7 @@ PerformPortalFetch(char *name,
*/
if (forward)
{
if (! portal->atEnd)
if (!portal->atEnd)
{
ExecutorRun(queryDesc, estate, EXEC_FOR, (long) count);
if (estate->es_processed > 0)
@ -191,7 +191,7 @@ PerformPortalFetch(char *name,
}
else
{
if (! portal->atStart)
if (!portal->atStart)
{
ExecutorRun(queryDesc, estate, EXEC_BACK, (long) count);
if (estate->es_processed > 0)
@ -502,8 +502,8 @@ AlterTableAddColumn(const char *relationName,
heap_close(rel, NoLock);
/*
* Automatically create the secondary relation for TOAST
* if it formerly had no such but now has toastable attributes.
* Automatically create the secondary relation for TOAST if it
* formerly had no such but now has toastable attributes.
*/
CommandCounterIncrement();
AlterTableCreateToastTable(relationName, true);
@ -1106,7 +1106,7 @@ AlterTableAddConstraint(char *relationName,
#endif
/* Disallow ADD CONSTRAINT on views, indexes, sequences, etc */
if (! is_relation(relationName))
if (!is_relation(relationName))
elog(ERROR, "ALTER TABLE ADD CONSTRAINT: %s is not a table",
relationName);
@ -1147,15 +1147,17 @@ AlterTableAddConstraint(char *relationName,
elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
/*
* Scan all of the rows, looking for a false match
* Scan all of the rows, looking for a false
* match
*/
scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
AssertState(scan != NULL);
/*
* We need to make a parse state and range table to allow
* us to transformExpr and fix_opids to get a version of
* the expression we can pass to ExecQual
* We need to make a parse state and range
* table to allow us to transformExpr and
* fix_opids to get a version of the
* expression we can pass to ExecQual
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntry(pstate, relationName, NULL,
@ -1174,14 +1176,16 @@ AlterTableAddConstraint(char *relationName,
name);
/*
* Make sure no outside relations are referred to.
* Make sure no outside relations are referred
* to.
*/
if (length(pstate->p_rtable) != 1)
elog(ERROR, "Only relation '%s' can be referenced in CHECK",
relationName);
/*
* Might as well try to reduce any constant expressions.
* Might as well try to reduce any constant
* expressions.
*/
expr = eval_const_expressions(expr);
@ -1197,15 +1201,15 @@ AlterTableAddConstraint(char *relationName,
econtext = MakeExprContext(slot, CurrentMemoryContext);
/*
* Scan through the rows now, checking the expression
* at each row.
* Scan through the rows now, checking the
* expression at each row.
*/
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
if (!ExecQual(qual, econtext, true))
{
successful=false;
successful = false;
break;
}
ResetExprContext(econtext);
@ -1221,10 +1225,12 @@ AlterTableAddConstraint(char *relationName,
heap_close(rel, NoLock);
elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
}
/*
* Call AddRelationRawConstraints to do the real adding --
* It duplicates some of the above, but does not check the
* validity of the constraint against tuples already in
* Call AddRelationRawConstraints to do the
* real adding -- It duplicates some of the
* above, but does not check the validity of
* the constraint against tuples already in
* the table.
*/
AddRelationRawConstraints(rel, NIL, constlist);
@ -1241,7 +1247,8 @@ AlterTableAddConstraint(char *relationName,
case T_FkConstraint:
{
FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
Relation rel, pkrel;
Relation rel,
pkrel;
HeapScanDesc scan;
HeapTuple tuple;
Trigger trig;
@ -1279,7 +1286,10 @@ AlterTableAddConstraint(char *relationName,
elog(ERROR, "referencing table \"%s\" not a relation",
relationName);
/* First we check for limited correctness of the constraint */
/*
* First we check for limited correctness of the
* constraint
*/
rel_attrs = pkrel->rd_att->attrs;
indexoidlist = RelationGetIndexList(pkrel);
@ -1302,24 +1312,30 @@ AlterTableAddConstraint(char *relationName,
{
List *attrl;
/* Make sure this index has the same number of keys -- It obviously
* won't match otherwise. */
/*
* Make sure this index has the same number of
* keys -- It obviously won't match otherwise.
*/
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
if (i!=length(fkconstraint->pk_attrs))
found=false;
else {
if (i != length(fkconstraint->pk_attrs))
found = false;
else
{
/* go through the fkconstraint->pk_attrs list */
foreach(attrl, fkconstraint->pk_attrs)
{
Ident *attr=lfirst(attrl);
Ident *attr = lfirst(attrl);
found = false;
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{
int pkattno = indexStruct->indkey[i];
if (pkattno>0)
if (pkattno > 0)
{
char *name = NameStr(rel_attrs[pkattno-1]->attname);
if (strcmp(name, attr->name)==0)
char *name = NameStr(rel_attrs[pkattno - 1]->attname);
if (strcmp(name, attr->name) == 0)
{
found = true;
break;
@ -1344,18 +1360,24 @@ AlterTableAddConstraint(char *relationName,
heap_close(pkrel, NoLock);
rel_attrs = rel->rd_att->attrs;
if (fkconstraint->fk_attrs!=NIL) {
if (fkconstraint->fk_attrs != NIL)
{
List *fkattrs;
Ident *fkattr;
found = false;
foreach(fkattrs, fkconstraint->fk_attrs) {
foreach(fkattrs, fkconstraint->fk_attrs)
{
int count;
found = false;
fkattr=lfirst(fkattrs);
for (count = 0; count < rel->rd_att->natts; count++) {
fkattr = lfirst(fkattrs);
for (count = 0; count < rel->rd_att->natts; count++)
{
char *name = NameStr(rel->rd_att->attrs[count]->attname);
if (strcmp(name, fkattr->name)==0) {
if (strcmp(name, fkattr->name) == 0)
{
found = true;
break;
}
@ -1396,7 +1418,7 @@ AlterTableAddConstraint(char *relationName,
Ident *fk_at = lfirst(list);
trig.tgargs[count] = fk_at->name;
count+=2;
count += 2;
}
count = 5;
foreach(list, fkconstraint->pk_attrs)
@ -1404,9 +1426,9 @@ AlterTableAddConstraint(char *relationName,
Ident *pk_at = lfirst(list);
trig.tgargs[count] = pk_at->name;
count+=2;
count += 2;
}
trig.tgnargs = count-1;
trig.tgnargs = count - 1;
scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
AssertState(scan != NULL);
@ -1472,7 +1494,7 @@ AlterTableOwner(const char *relationName, const char *newOwnerName)
/*
* first check that we are a superuser
*/
if (! superuser())
if (!superuser())
elog(ERROR, "ALTER TABLE: permission denied");
/*
@ -1618,7 +1640,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
/*
* Check to see whether the table actually needs a TOAST table.
*/
if (! needs_toast_table(rel))
if (!needs_toast_table(rel))
{
if (silent)
{
@ -1652,10 +1674,11 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
"chunk_data",
BYTEAOID,
-1, 0, false);
/*
* Ensure that the toast table doesn't itself get toasted,
* or we'll be toast :-(. This is essential for chunk_data because
* type bytea is toastable; hit the other two just to be sure.
* Ensure that the toast table doesn't itself get toasted, or we'll be
* toast :-(. This is essential for chunk_data because type bytea is
* toastable; hit the other two just to be sure.
*/
tupdesc->attrs[0]->attstorage = 'p';
tupdesc->attrs[1]->attstorage = 'p';

View File

@ -7,7 +7,7 @@
* Copyright (c) 1999, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.26 2001/01/23 04:32:21 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.27 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.134 2001/03/14 21:47:50 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -76,6 +76,7 @@ static StringInfoData attribute_buf;
#ifdef MULTIBYTE
static int client_encoding;
static int server_encoding;
#endif
@ -285,6 +286,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
"directly to or from a file. Anyone can COPY to stdout or "
"from stdin. Psql's \\copy command also works for anyone.");
/*
* This restriction is unfortunate, but necessary until the frontend
* COPY protocol is redesigned to be binary-safe...
@ -344,8 +346,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
mode_t oumask; /* Pre-existing umask value */
/*
* Prevent write to relative path ... too easy to shoot oneself
* in the foot by overwriting a database file ...
* Prevent write to relative path ... too easy to shoot
* oneself in the foot by overwriting a database file ...
*/
if (filename[0] != '/')
elog(ERROR, "Relative path not allowed for server side"
@ -408,7 +410,10 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
attr_count = rel->rd_att->natts;
attr = rel->rd_att->attrs;
/* For binary copy we really only need isvarlena, but compute it all... */
/*
* For binary copy we really only need isvarlena, but compute it
* all...
*/
out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
elements = (Oid *) palloc(attr_count * sizeof(Oid));
isvarlena = (bool *) palloc(attr_count * sizeof(bool));
@ -507,10 +512,12 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
}
else
{
/*
* If we have a toasted datum, forcibly detoast it to avoid
* memory leakage inside the type's output routine (or
* for binary case, becase we must output untoasted value).
* If we have a toasted datum, forcibly detoast it to
* avoid memory leakage inside the type's output routine
* (or for binary case, becase we must output untoasted
* value).
*/
if (isvarlena[i])
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
@ -552,8 +559,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
* We need this horsing around because we don't know
* how shorter data values are aligned within a Datum.
* We need this horsing around because we don't
* know how shorter data values are aligned within
* a Datum.
*/
store_att_byval(&datumBuf, value, fld_size);
CopySendData(&datumBuf,
@ -622,8 +630,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
/*
* We need a ResultRelInfo so we can use the regular executor's
* index-entry-making machinery. (There used to be a huge amount
* of code here that basically duplicated execUtils.c ...)
* index-entry-making machinery. (There used to be a huge amount of
* code here that basically duplicated execUtils.c ...)
*/
resultRelInfo = makeNode(ResultRelInfo);
resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
@ -673,7 +681,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: bogus file header (missing flags)");
file_has_oids = (tmp & (1 << 16)) != 0;
tmp &= ~ (1 << 16);
tmp &= ~(1 << 16);
if ((tmp >> 16) != 0)
elog(ERROR, "COPY BINARY: unrecognized critical flags in header");
/* Header extension length */
@ -794,7 +802,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
continue; /* it's NULL; nulls[i] already set */
if (fld_size != attr[i]->attlen)
elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d",
i+1, (int) fld_size, (int) attr[i]->attlen);
i + 1, (int) fld_size, (int) attr[i]->attlen);
if (fld_size == -1)
{
/* varlena field */
@ -833,8 +841,9 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
* We need this horsing around because we don't know
* how shorter data values are aligned within a Datum.
* We need this horsing around because we don't
* know how shorter data values are aligned within
* a Datum.
*/
Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp);
@ -1163,6 +1172,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
char *string_start;
int mblen;
int i;
#endif
#ifdef MULTIBYTE
@ -1182,7 +1192,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
#endif
#ifdef MULTIBYTE
for (; (mblen = (server_encoding == client_encoding? 1 : pg_encoding_mblen(client_encoding, string))) &&
for (; (mblen = (server_encoding == client_encoding ? 1 : pg_encoding_mblen(client_encoding, string))) &&
((c = *string) != '\0'); string += mblen)
#else
for (; (c = *string) != '\0'; string++)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.72 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -152,8 +152,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
/*
* Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't
* see the new rel anyway until we commit), but it keeps the lock manager
* from complaining about deadlock risks.
* see the new rel anyway until we commit), but it keeps the lock
* manager from complaining about deadlock risks.
*/
rel = heap_openr(relname, AccessExclusiveLock);
@ -255,11 +255,11 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno)
if (var->varlevelsup == 0 && var->varno == 1)
{
/*
* ??? the following may be a problem when the
* node is multiply referenced though
* stringToNode() doesn't create such a node
* currently.
* ??? the following may be a problem when the node is
* multiply referenced though stringToNode() doesn't create
* such a node currently.
*/
Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1];
@ -373,9 +373,12 @@ MergeAttributes(List *schema, List *supers, bool istemp,
AttrNumber attrno;
TupleDesc tupleDesc;
TupleConstr *constr;
AttrNumber *newattno, *partialAttidx;
AttrNumber *newattno,
*partialAttidx;
Node *expr;
int i, attidx, attno_exist;
int i,
attidx,
attno_exist;
relation = heap_openr(name, AccessShareLock);
@ -385,7 +388,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && is_temp_rel_name(name))
elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"", name);
/* We should have an UNDER permission flag for this, but for now,
/*
* We should have an UNDER permission flag for this, but for now,
* demand that creator of a child table own the parent.
*/
if (!pg_ownercheck(GetUserId(), name, RELNAME))
@ -397,14 +401,15 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/* allocate a new attribute number table and initialize */
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
newattno [i] = 0;
newattno[i] = 0;
/*
* searching and storing order are different.
* another table is needed.
* searching and storing order are different. another table is
* needed.
*/
partialAttidx = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
partialAttidx [i] = 0;
partialAttidx[i] = 0;
constr = tupleDesc->constr;
attidx = 0;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.73 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.74 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -84,10 +84,10 @@ createdb(const char *dbname, const char *dbpath,
/*
* Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit.
* However, holding an exclusive lock on pg_database for the whole time
* we are copying the source database doesn't seem like a good idea,
* so accept possibility of race to create. We will check again after
* we grab the exclusive lock.
* However, holding an exclusive lock on pg_database for the whole
* time we are copying the source database doesn't seem like a good
* idea, so accept possibility of race to create. We will check again
* after we grab the exclusive lock.
*/
if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL))
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
@ -102,9 +102,10 @@ createdb(const char *dbname, const char *dbpath,
&src_istemplate, &src_lastsysoid, src_dbpath))
elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist",
dbtemplate);
/*
* Permission check: to copy a DB that's not marked datistemplate,
* you must be superuser or the owner thereof.
* Permission check: to copy a DB that's not marked datistemplate, you
* must be superuser or the owner thereof.
*/
if (!src_istemplate)
{
@ -112,6 +113,7 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
dbtemplate);
}
/*
* Determine physical path of source database
*/
@ -134,13 +136,15 @@ createdb(const char *dbname, const char *dbpath,
encoding = src_encoding;
/*
* Preassign OID for pg_database tuple, so that we can compute db path.
* Preassign OID for pg_database tuple, so that we can compute db
* path.
*/
dboid = newoid();
/*
* Compute nominal location (where we will try to access the database),
* and resolve alternate physical location if one is specified.
* Compute nominal location (where we will try to access the
* database), and resolve alternate physical location if one is
* specified.
*/
nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid);
@ -155,8 +159,8 @@ createdb(const char *dbname, const char *dbpath,
/*
* Force dirty buffers out to disk, to ensure source database is
* up-to-date for the copy. (We really only need to flush buffers
* for the source database...)
* up-to-date for the copy. (We really only need to flush buffers for
* the source database...)
*/
BufferSync();
@ -231,7 +235,8 @@ createdb(const char *dbname, const char *dbpath,
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
tuple->t_data->t_oid = dboid; /* override heap_insert's OID selection */
tuple->t_data->t_oid = dboid; /* override heap_insert's OID
* selection */
heap_insert(pg_database_rel, tuple);
@ -311,8 +316,8 @@ dropdb(const char *dbname)
elog(ERROR, "DROP DATABASE: permission denied");
/*
* Disallow dropping a DB that is marked istemplate. This is just
* to prevent people from accidentally dropping template0 or template1;
* Disallow dropping a DB that is marked istemplate. This is just to
* prevent people from accidentally dropping template0 or template1;
* they can do so if they're really determined ...
*/
if (db_istemplate)
@ -338,6 +343,7 @@ dropdb(const char *dbname)
tup = heap_getnext(pgdbscan, 0);
if (!HeapTupleIsValid(tup))
{
/*
* This error should never come up since the existence of the
* database is checked earlier
@ -481,10 +487,10 @@ get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb)
static char *
resolve_alt_dbpath(const char * dbpath, Oid dboid)
resolve_alt_dbpath(const char *dbpath, Oid dboid)
{
const char * prefix;
char * ret;
const char *prefix;
char *ret;
size_t len;
if (dbpath == NULL || dbpath[0] == '\0')
@ -502,7 +508,8 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
else
{
/* must be environment variable */
char * var = getenv(dbpath);
char *var = getenv(dbpath);
if (!var)
elog(ERROR, "Postmaster environment variable '%s' not set", dbpath);
if (var[0] != '/')
@ -519,7 +526,7 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
static bool
remove_dbdirs(const char * nominal_loc, const char * alt_loc)
remove_dbdirs(const char *nominal_loc, const char *alt_loc)
{
const char *target_dir;
char buf[MAXPGPATH + 100];

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.52 2001/02/12 20:07:21 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -70,7 +70,7 @@ case_translate_language_name(const char *input, char *output)
--------------------------------------------------------------------------*/
int i;
for (i = 0; i < NAMEDATALEN-1 && input[i]; ++i)
for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
output[i] = '\0';
@ -217,21 +217,26 @@ void
CreateFunction(ProcedureStmt *stmt, CommandDest dest)
{
char *probin_str;
/* pathname of executable file that executes this function, if any */
char *prosrc_str;
/* SQL that executes this function, if any */
char *prorettype;
/* Type of return value (or member of set of values) from function */
char languageName[NAMEDATALEN];
/*
* name of language of function, with case adjusted: "C",
* "internal", "sql", etc.
* name of language of function, with case adjusted: "C", "internal",
* "sql", etc.
*/
bool returnsSet;
/* The function returns a set of values, as opposed to a singleton. */
/*
@ -380,14 +385,14 @@ DefineOperator(char *oprName,
{
typeName1 = defGetString(defel);
if (IsA(defel->arg, TypeName)
&& ((TypeName *) defel->arg)->setof)
&&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for leftarg");
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
typeName2 = defGetString(defel);
if (IsA(defel->arg, TypeName)
&& ((TypeName *) defel->arg)->setof)
&&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for rightarg");
}
else if (strcasecmp(defel->defname, "procedure") == 0)
@ -478,8 +483,8 @@ DefineAggregate(char *aggName, List *parameters)
DefElem *defel = (DefElem *) lfirst(pl);
/*
* sfunc1, stype1, and initcond1 are accepted as obsolete spellings
* for sfunc, stype, initcond.
* sfunc1, stype1, and initcond1 are accepted as obsolete
* spellings for sfunc, stype, initcond.
*/
if (strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetString(defel);
@ -543,13 +548,13 @@ DefineType(char *typeName, List *parameters)
char delimiter = DEFAULT_TYPDELIM;
char *shadow_type;
List *pl;
char alignment = 'i'; /* default alignment */
char alignment = 'i';/* default alignment */
char storage = 'p'; /* default storage in TOAST */
/*
* Type names must be one character shorter than other names,
* allowing room to create the corresponding array type name with
* prepended "_".
* Type names must be one character shorter than other names, allowing
* room to create the corresponding array type name with prepended
* "_".
*/
if (strlen(typeName) > (NAMEDATALEN - 2))
{
@ -699,7 +704,9 @@ defGetString(DefElem *def)
return str;
}
case T_Float:
/* T_Float values are kept in string form, so this type cheat
/*
* T_Float values are kept in string form, so this type cheat
* works (and doesn't risk losing precision)
*/
return strVal(def->arg);

View File

@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.64 2001/01/27 01:41:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.65 2001/03/22 03:59:22 momjian Exp $
*
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.45 2001/02/23 09:26:14 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -118,9 +118,9 @@ DefineIndex(char *heapRelationName,
accessMethodName);
/*
* XXX Hardwired hacks to check for limitations on supported index types.
* We really ought to be learning this info from entries in the pg_am
* table, instead of having it wired in here!
* XXX Hardwired hacks to check for limitations on supported index
* types. We really ought to be learning this info from entries in the
* pg_am table, instead of having it wired in here!
*/
if (unique && accessMethodId != BTREE_AM_OID)
elog(ERROR, "DefineIndex: unique indices are only available with the btree access method");
@ -161,7 +161,8 @@ DefineIndex(char *heapRelationName,
elog(ERROR, "Existing indexes are inactive. REINDEX first");
/*
* Prepare arguments for index_create, primarily an IndexInfo structure
* Prepare arguments for index_create, primarily an IndexInfo
* structure
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_Predicate = (Node *) cnfPred;
@ -415,7 +416,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
* has exact-match or binary-compatible input types.
* ----------------
*/
if (! func_get_detail(funcIndex->name, nargs, argTypes,
if (!func_get_detail(funcIndex->name, nargs, argTypes,
&funcid, &rettype, &retset, &true_typeids))
func_error("DefineIndex", funcIndex->name, nargs, argTypes, NULL);
@ -425,7 +426,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
for (i = 0; i < nargs; i++)
{
if (argTypes[i] != true_typeids[i] &&
! IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
!IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
func_error("DefineIndex", funcIndex->name, nargs, argTypes,
"Index function must be binary-compatible with table datatype");
}
@ -439,7 +440,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
indexInfo->ii_FuncOid = funcid;
/* Need to do the fmgr function lookup now, too */
fmgr_info(funcid, & indexInfo->ii_FuncInfo);
fmgr_info(funcid, &indexInfo->ii_FuncInfo);
}
static void
@ -515,8 +516,8 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
attribute->class);
/*
* Assume the opclass is supported by this index access method
* if we can find at least one relevant entry in pg_amop.
* Assume the opclass is supported by this index access method if we
* can find at least one relevant entry in pg_amop.
*/
ScanKeyEntryInitialize(&entry[0], 0,
Anum_pg_amop_amopid,
@ -530,7 +531,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
relation = heap_openr(AccessMethodOperatorRelationName, AccessShareLock);
scan = heap_beginscan(relation, false, SnapshotNow, 2, entry);
if (! HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
if (!HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
elog(ERROR, "DefineIndex: opclass \"%s\" not supported by access method \"%s\"",
attribute->class, accessMethodName);
@ -540,17 +541,18 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
heap_close(relation, AccessShareLock);
/*
* Make sure the operators associated with this opclass actually accept
* the column data type. This prevents possible coredumps caused by
* user errors like applying text_ops to an int4 column. We will accept
* an opclass as OK if the operator's input datatype is binary-compatible
* with the actual column datatype. Note we assume that all the operators
* associated with an opclass accept the same datatypes, so checking the
* first one we happened to find in the table is sufficient.
* Make sure the operators associated with this opclass actually
* accept the column data type. This prevents possible coredumps
* caused by user errors like applying text_ops to an int4 column. We
* will accept an opclass as OK if the operator's input datatype is
* binary-compatible with the actual column datatype. Note we assume
* that all the operators associated with an opclass accept the same
* datatypes, so checking the first one we happened to find in the
* table is sufficient.
*
* If the opclass was the default for the datatype, assume we can skip
* this check --- that saves a few cycles in the most common case.
* If pg_opclass is wrong then we're probably screwed anyway...
* this check --- that saves a few cycles in the most common case. If
* pg_opclass is wrong then we're probably screwed anyway...
*/
if (doTypeCheck)
{
@ -564,7 +566,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
optup->oprright : optup->oprleft;
if (attrType != opInputType &&
! IS_BINARY_COMPATIBLE(attrType, opInputType))
!IS_BINARY_COMPATIBLE(attrType, opInputType))
elog(ERROR, "DefineIndex: opclass \"%s\" does not accept datatype \"%s\"",
attribute->class, typeidTypeName(attrType));
ReleaseSysCache(tuple);
@ -752,18 +754,18 @@ ReindexDatabase(const char *dbname, bool force, bool all)
elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database.");
/*
* We cannot run inside a user transaction block; if we were
* inside a transaction, then our commit- and
* start-transaction-command calls would not have the intended effect!
* We cannot run inside a user transaction block; if we were inside a
* transaction, then our commit- and start-transaction-command calls
* would not have the intended effect!
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block");
/*
* Create a memory context that will survive forced transaction commits
* we do below. Since it is a child of QueryContext, it will go away
* eventually even if we suffer an error; there's no need for special
* abort cleanup logic.
* Create a memory context that will survive forced transaction
* commits we do below. Since it is a child of QueryContext, it will
* go away eventually even if we suffer an error; there's no need for
* special abort cleanup logic.
*/
private_context = AllocSetContextCreate(QueryContext,
"ReindexDatabase",

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.59 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.60 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.56 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -189,15 +189,15 @@ renamerel(const char *oldrelname, const char *newrelname)
newrelname);
/*
* Check for renaming a temp table, which only requires altering
* the temp-table mapping, not the underlying table.
* Check for renaming a temp table, which only requires altering the
* temp-table mapping, not the underlying table.
*/
if (rename_temp_relation(oldrelname, newrelname))
return; /* all done... */
/*
* Instead of using heap_openr(), do it the hard way, so that we
* can rename indexes as well as regular relations.
* Instead of using heap_openr(), do it the hard way, so that we can
* rename indexes as well as regular relations.
*/
targetrelation = RelationNameGetRelation(oldrelname);
@ -219,8 +219,9 @@ renamerel(const char *oldrelname, const char *newrelname)
heap_close(targetrelation, NoLock);
/*
* Flush the relcache entry (easier than trying to change it at exactly
* the right instant). It'll get rebuilt on next access to relation.
* Flush the relcache entry (easier than trying to change it at
* exactly the right instant). It'll get rebuilt on next access to
* relation.
*
* XXX What if relation is myxactonly?
*
@ -244,8 +245,8 @@ renamerel(const char *oldrelname, const char *newrelname)
elog(ERROR, "renamerel: relation \"%s\" exists", newrelname);
/*
* Update pg_class tuple with new relname. (Scribbling on reltup
* is OK because it's a copy...)
* Update pg_class tuple with new relname. (Scribbling on reltup is
* OK because it's a copy...)
*/
StrNCpy(NameStr(((Form_pg_class) GETSTRUCT(reltup))->relname),
newrelname, NAMEDATALEN);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.51 2001/03/07 21:20:26 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.52 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -140,7 +140,7 @@ DefineSequence(CreateSeqStmt *seq)
case SEQ_COL_LOG:
typnam->name = "int4";
coldef->colname = "log_cnt";
value[i - 1] = Int32GetDatum((int32)1);
value[i - 1] = Int32GetDatum((int32) 1);
break;
case SEQ_COL_CYCLE:
typnam->name = "char";
@ -311,7 +311,7 @@ nextval(PG_FUNCTION_ARGS)
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@ -319,12 +319,12 @@ nextval(PG_FUNCTION_ARGS)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
rdata[1].len = ((PageHeader)page)->pd_special -
((PageHeader)page)->pd_upper;
rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
rdata[1].len = ((PageHeader) page)->pd_special -
((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@ -409,7 +409,8 @@ do_setval(char *seqname, int32 next, bool iscalled)
/* save info in local cache */
elm->last = next; /* last returned number */
elm->cached = next; /* last cached number (forget cached values) */
elm->cached = next; /* last cached number (forget cached
* values) */
START_CRIT_SECTION();
{
@ -420,7 +421,7 @@ do_setval(char *seqname, int32 next, bool iscalled)
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char*)&xlrec;
rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@ -428,12 +429,12 @@ do_setval(char *seqname, int32 next, bool iscalled)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
rdata[1].len = ((PageHeader)page)->pd_special -
((PageHeader)page)->pd_upper;
rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
rdata[1].len = ((PageHeader) page)->pd_special -
((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@ -511,6 +512,7 @@ get_seq_name(text *seqin)
else
{
seqname = rawname;
/*
* It's important that this match the identifier downcasing code
* used by backend/parser/scan.l.
@ -752,7 +754,8 @@ get_param(DefElem *def)
return -1;
}
void seq_redo(XLogRecPtr lsn, XLogRecord *record)
void
seq_redo(XLogRecPtr lsn, XLogRecord *record)
{
uint8 info = record->xl_info & ~XLR_INFO_MASK;
Relation reln;
@ -760,7 +763,7 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
Page page;
char *item;
Size itemsz;
xl_seq_rec *xlrec = (xl_seq_rec*) XLogRecGetData(record);
xl_seq_rec *xlrec = (xl_seq_rec *) XLogRecGetData(record);
sequence_magic *sm;
if (info != XLOG_SEQ_LOG)
@ -781,10 +784,10 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
sm = (sequence_magic *) PageGetSpecialPointer(page);
sm->magic = SEQ_MAGIC;
item = (char*)xlrec + sizeof(xl_seq_rec);
item = (char *) xlrec + sizeof(xl_seq_rec);
itemsz = record->xl_len - sizeof(xl_seq_rec);
itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item)item, itemsz,
if (PageAddItem(page, (Item) item, itemsz,
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "seq_redo: failed to add item to page");
@ -795,14 +798,16 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
return;
}
void seq_undo(XLogRecPtr lsn, XLogRecord *record)
void
seq_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
void seq_desc(char *buf, uint8 xl_info, char* rec)
void
seq_desc(char *buf, uint8 xl_info, char *rec)
{
uint8 info = xl_info & ~XLR_INFO_MASK;
xl_seq_rec *xlrec = (xl_seq_rec*) rec;
xl_seq_rec *xlrec = (xl_seq_rec *) rec;
if (info == XLOG_SEQ_LOG)
strcat(buf, "log: ");

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.88 2001/03/14 21:50:32 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -87,7 +87,9 @@ CreateTrigger(CreateTrigStmt *stmt)
constrrelid = InvalidOid;
else
{
/* NoLock is probably sufficient here, since we're only
/*
* NoLock is probably sufficient here, since we're only
* interested in getting the relation's OID...
*/
rel = heap_openr(stmt->constrrelname, NoLock);
@ -211,7 +213,7 @@ CreateTrigger(CreateTrigStmt *stmt)
foreach(le, stmt->args)
{
char *ar = ((Value*) lfirst(le))->val.str;
char *ar = ((Value *) lfirst(le))->val.str;
len += strlen(ar) + 4;
for (; *ar; ar++)
@ -224,7 +226,7 @@ CreateTrigger(CreateTrigStmt *stmt)
args[0] = '\0';
foreach(le, stmt->args)
{
char *s = ((Value*) lfirst(le))->val.str;
char *s = ((Value *) lfirst(le))->val.str;
char *d = args + strlen(args);
while (*s)
@ -577,7 +579,8 @@ RelationBuildTriggers(Relation relation)
DatumGetCString(DirectFunctionCall1(nameout,
NameGetDatum(&pg_trigger->tgname))));
build->tgfoid = pg_trigger->tgfoid;
build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as uninitialized */
build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as
* uninitialized */
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
build->tgisconstraint = pg_trigger->tgisconstraint;
@ -841,17 +844,17 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContext oldContext;
/*
* Fmgr lookup info is cached in the Trigger structure,
* so that we need not repeat the lookup on every call.
* Fmgr lookup info is cached in the Trigger structure, so that we
* need not repeat the lookup on every call.
*/
if (trigger->tgfunc.fn_oid == InvalidOid)
fmgr_info(trigger->tgfoid, &trigger->tgfunc);
/*
* Do the function evaluation in the per-tuple memory context,
* so that leaked memory will be reclaimed once per tuple.
* Note in particular that any new tuple created by the trigger function
* will live till the end of the tuple cycle.
* Do the function evaluation in the per-tuple memory context, so that
* leaked memory will be reclaimed once per tuple. Note in particular
* that any new tuple created by the trigger function will live till
* the end of the tuple cycle.
*/
oldContext = MemoryContextSwitchTo(per_tuple_context);
@ -868,8 +871,8 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContextSwitchTo(oldContext);
/*
* Trigger protocol allows function to return a null pointer,
* but NOT to set the isnull result flag.
* Trigger protocol allows function to return a null pointer, but NOT
* to set the isnull result flag.
*/
if (fcinfo.isnull)
elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL",
@ -915,9 +918,7 @@ ExecARInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0)
{
DeferredTriggerSaveEvent(rel, TRIGGER_EVENT_INSERT, NULL, trigtuple);
}
}
bool
@ -1240,10 +1241,11 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
static void
deferredTriggerAddEvent(DeferredTriggerEvent event)
{
/*
* Since the event list could grow quite long, we keep track of the
* list tail and append there, rather than just doing a stupid "lappend".
* This avoids O(N^2) behavior for large numbers of events.
* list tail and append there, rather than just doing a stupid
* "lappend". This avoids O(N^2) behavior for large numbers of events.
*/
event->dte_next = NULL;
if (deftrig_event_tail == NULL)

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.73 2001/01/24 19:42:53 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -603,6 +603,7 @@ DropUser(DropUserStmt *stmt)
}
heap_endscan(scan);
heap_close(pg_rel, AccessExclusiveLock);
/*
* Advance command counter so that later iterations of this loop
* will see the changes already made. This is essential if, for

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.187 2001/03/14 08:40:57 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.188 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -120,9 +120,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *anal_cols)
/*
* Create special memory context for cross-transaction storage.
*
* Since it is a child of QueryContext, it will go away eventually
* even if we suffer an error; there's no need for special abort
* cleanup logic.
* Since it is a child of QueryContext, it will go away eventually even
* if we suffer an error; there's no need for special abort cleanup
* logic.
*/
vac_context = AllocSetContextCreate(QueryContext,
"Vacuum",
@ -215,8 +215,8 @@ vacuum_shutdown()
/*
* Clean up working storage --- note we must do this after
* StartTransactionCommand, else we might be trying to delete
* the active context!
* StartTransactionCommand, else we might be trying to delete the
* active context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
@ -360,10 +360,10 @@ vacuum_rel(Oid relid)
{
Relation onerel;
LockRelId onerelid;
VacPageListData vacuum_pages; /* List of pages to vacuum and/or clean
* indices */
VacPageListData fraged_pages; /* List of pages with space enough for
* re-using */
VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* clean indices */
VacPageListData fraged_pages; /* List of pages with space enough
* for re-using */
Relation *Irel;
int32 nindices,
i;
@ -412,9 +412,9 @@ vacuum_rel(Oid relid)
/*
* Get a session-level exclusive lock too. This will protect our
* exclusive access to the relation across multiple transactions,
* so that we can vacuum the relation's TOAST table (if any) secure
* in the knowledge that no one is diddling the parent relation.
* exclusive access to the relation across multiple transactions, so
* that we can vacuum the relation's TOAST table (if any) secure in
* the knowledge that no one is diddling the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
@ -459,9 +459,10 @@ vacuum_rel(Oid relid)
else
vacrelstats->hasindex = false;
#ifdef NOT_USED
/*
* reindex in VACUUM is dangerous under WAL.
* ifdef out until it becomes safe.
* reindex in VACUUM is dangerous under WAL. ifdef out until it
* becomes safe.
*/
if (reindex)
{
@ -506,6 +507,7 @@ vacuum_rel(Oid relid)
}
else
{
/*
* Flush dirty pages out to disk. We must do this even if we
* didn't do anything else, because we want to ensure that all
@ -537,11 +539,11 @@ vacuum_rel(Oid relid)
CommitTransactionCommand();
/*
* If the relation has a secondary toast one, vacuum that too
* while we still hold the session lock on the master table.
* We don't need to propagate "analyze" to it, because the toaster
* always uses hardcoded index access and statistics are
* totally unimportant for toast relations
* If the relation has a secondary toast one, vacuum that too while we
* still hold the session lock on the master table. We don't need to
* propagate "analyze" to it, because the toaster always uses
* hardcoded index access and statistics are totally unimportant for
* toast relations
*/
if (toast_relid != InvalidOid)
vacuum_rel(toast_relid);
@ -964,8 +966,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
nunused, (unsigned long)min_tlen, (unsigned long)max_tlen,
(unsigned long)free_size, (unsigned long)usable_free_size,
nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
(unsigned long) free_size, (unsigned long) usable_free_size,
empty_end_pages, fraged_pages->num_pages,
show_rusage(&ru0));
@ -1142,8 +1144,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* If this (chain) tuple is moved by me already then I
* have to check is it in vacpage or not - i.e. is it moved
* while cleaning this page or some previous one.
* have to check is it in vacpage or not - i.e. is it
* moved while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@ -1232,8 +1234,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* xaction and this tuple is already deleted by
* me. Actually, upper part of chain should be
* removed and seems that this should be handled
* in scan_heap(), but it's not implemented at
* the moment and so we just stop shrinking here.
* in scan_heap(), but it's not implemented at the
* moment and so we just stop shrinking here.
*/
ReleaseBuffer(Cbuf);
pfree(vtmove);
@ -1256,8 +1258,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
/*
* if to_vacpage no longer has enough free space to be
* useful, remove it from fraged_pages list
* if to_vacpage no longer has enough free space
* to be useful, remove it from fraged_pages list
*/
if (to_vacpage != NULL &&
!enough_space(to_vacpage, vacrelstats->min_tlen))
@ -1460,21 +1462,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
*
* NOTE: a nasty bug used to lurk here. It is possible
* for the source and destination pages to be the same
* (since this tuple-chain member can be on a page lower
* than the one we're currently processing in the outer
* loop). If that's true, then after vacuum_page() the
* source tuple will have been moved, and tuple.t_data
* will be pointing at garbage. Therefore we must do
* everything that uses tuple.t_data BEFORE this step!!
* (since this tuple-chain member can be on a page
* lower than the one we're currently processing in
* the outer loop). If that's true, then after
* vacuum_page() the source tuple will have been
* moved, and tuple.t_data will be pointing at
* garbage. Therefore we must do everything that uses
* tuple.t_data BEFORE this step!!
*
* This path is different from the other callers of
* vacuum_page, because we have already incremented the
* vacpage's offsets_used field to account for the
* vacuum_page, because we have already incremented
* the vacpage's offsets_used field to account for the
* tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is
* wrong. But since that's a good debugging check for
* all other callers, we work around it here rather
* than remove it.
* vacuum_page's check for offsets_used == 0 is wrong.
* But since that's a good debugging check for all
* other callers, we work around it here rather than
* remove it.
*/
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
{
@ -1498,7 +1501,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (newoff == InvalidOffsetNumber)
{
elog(STOP, "moving chain: failed to add item with len = %lu to page %u",
(unsigned long)tuple_len, destvacpage->blkno);
(unsigned long) tuple_len, destvacpage->blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
@ -1526,7 +1529,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Set new tuple's t_ctid pointing to itself for last
* tuple in chain, and to next tuple in chain otherwise.
* tuple in chain, and to next tuple in chain
* otherwise.
*/
if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self;
@ -1552,13 +1556,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (Irel != (Relation *) NULL)
{
/*
* XXX using CurrentMemoryContext here means
* intra-vacuum memory leak for functional indexes.
* Should fix someday.
* intra-vacuum memory leak for functional
* indexes. Should fix someday.
*
* XXX This code fails to handle partial indexes!
* Probably should change it to use ExecOpenIndices.
* Probably should change it to use
* ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@ -1653,7 +1659,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
elog(STOP, "\
failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
(unsigned long)tuple_len, cur_page->blkno, (unsigned long)cur_page->free,
(unsigned long) tuple_len, cur_page->blkno, (unsigned long) cur_page->free,
cur_page->offsets_used, cur_page->offsets_free);
}
newitemid = PageGetItemId(ToPage, newoff);
@ -1698,13 +1704,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* insert index' tuples if needed */
if (Irel != (Relation *) NULL)
{
/*
* XXX using CurrentMemoryContext here means
* intra-vacuum memory leak for functional indexes.
* Should fix someday.
* XXX using CurrentMemoryContext here means intra-vacuum
* memory leak for functional indexes. Should fix someday.
*
* XXX This code fails to handle partial indexes!
* Probably should change it to use ExecOpenIndices.
* XXX This code fails to handle partial indexes! Probably
* should change it to use ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@ -1803,14 +1809,15 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (num_moved > 0)
{
/*
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require
* a lot of extra code to close and re-open the relation, indices,
* etc. For now, a quick hack: record status of current transaction
* as committed, and continue.
* etc. For now, a quick hack: record status of current
* transaction as committed, and continue.
*/
RecordTransactionCommit();
}
@ -1907,7 +1914,7 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
vacpage->offsets_free > 0)
{
char unbuf[BLCKSZ];
OffsetNumber *unused = (OffsetNumber*)unbuf;
OffsetNumber *unused = (OffsetNumber *) unbuf;
int uncnt;
buf = ReadBuffer(onerel, vacpage->blkno);
@ -1943,8 +1950,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
recptr = log_heap_clean(onerel, buf, (char*)unused,
(char*)(&(unused[uncnt])) - (char*)unused);
recptr = log_heap_clean(onerel, buf, (char *) unused,
(char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}
@ -1962,9 +1970,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all tuples
* have correct on-row commit status on disk (see bufmgr.c's comments
* for FlushRelationBuffers()).
* we don't need to truncate, because we want to ensure that all
* tuples have correct on-row commit status on disk (see bufmgr.c's
* comments for FlushRelationBuffers()).
*/
i = FlushRelationBuffers(onerel, blkno);
if (i < 0)
@ -2005,8 +2013,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
int i;
nblocks = vacuum_pages->num_pages;
nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with
* them */
nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
{
@ -2022,9 +2029,9 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all tuples
* have correct on-row commit status on disk (see bufmgr.c's comments
* for FlushRelationBuffers()).
* we don't need to truncate, because we want to ensure that all
* tuples have correct on-row commit status on disk (see bufmgr.c's
* comments for FlushRelationBuffers()).
*/
Assert(vacrelstats->num_pages >= vacuum_pages->empty_end_pages);
nblocks = vacrelstats->num_pages - vacuum_pages->empty_end_pages;
@ -2042,7 +2049,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
vacrelstats->num_pages, nblocks);
nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
Assert(nblocks >= 0);
vacrelstats->num_pages = nblocks; /* set new number of blocks */
vacrelstats->num_pages = nblocks; /* set new number of
* blocks */
}
}
@ -2054,7 +2062,7 @@ static void
vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
{
char unbuf[BLCKSZ];
OffsetNumber *unused = (OffsetNumber*)unbuf;
OffsetNumber *unused = (OffsetNumber *) unbuf;
int uncnt;
Page page = BufferGetPage(buffer);
ItemId itemid;
@ -2072,8 +2080,9 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
recptr = log_heap_clean(onerel, buffer, (char*)unused,
(char*)(&(unused[uncnt])) - (char*)unused);
recptr = log_heap_clean(onerel, buffer, (char *) unused,
(char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.45 2001/01/24 19:42:53 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.46 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -453,6 +453,7 @@ parse_DefaultXactIsoLevel(char *value)
{
#if 0
TransactionState s = CurrentTransactionState;
#endif
if (value == NULL)
@ -701,25 +702,24 @@ reset_server_encoding(void)
void
SetPGVariable(const char *name, const char *value)
{
char *mvalue = value ? pstrdup(value) : ((char*) NULL);
char *mvalue = value ? pstrdup(value) : ((char *) NULL);
/*
* Special cases ought to be removed and handled separately
* by TCOP
* Special cases ought to be removed and handled separately by TCOP
*/
if (strcasecmp(name, "datestyle")==0)
if (strcasecmp(name, "datestyle") == 0)
parse_date(mvalue);
else if (strcasecmp(name, "timezone")==0)
else if (strcasecmp(name, "timezone") == 0)
parse_timezone(mvalue);
else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
parse_DefaultXactIsoLevel(mvalue);
else if (strcasecmp(name, "XactIsoLevel")==0)
else if (strcasecmp(name, "XactIsoLevel") == 0)
parse_XactIsoLevel(mvalue);
else if (strcasecmp(name, "client_encoding")==0)
else if (strcasecmp(name, "client_encoding") == 0)
parse_client_encoding(mvalue);
else if (strcasecmp(name, "server_encoding")==0)
else if (strcasecmp(name, "server_encoding") == 0)
parse_server_encoding(mvalue);
else if (strcasecmp(name, "random_seed")==0)
else if (strcasecmp(name, "random_seed") == 0)
parse_random_seed(mvalue);
else
SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
@ -732,23 +732,24 @@ SetPGVariable(const char *name, const char *value)
void
GetPGVariable(const char *name)
{
if (strcasecmp(name, "datestyle")==0)
if (strcasecmp(name, "datestyle") == 0)
show_date();
else if (strcasecmp(name, "timezone")==0)
else if (strcasecmp(name, "timezone") == 0)
show_timezone();
else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
show_DefaultXactIsoLevel();
else if (strcasecmp(name, "XactIsoLevel")==0)
else if (strcasecmp(name, "XactIsoLevel") == 0)
show_XactIsoLevel();
else if (strcasecmp(name, "client_encoding")==0)
else if (strcasecmp(name, "client_encoding") == 0)
show_client_encoding();
else if (strcasecmp(name, "server_encoding")==0)
else if (strcasecmp(name, "server_encoding") == 0)
show_server_encoding();
else if (strcasecmp(name, "random_seed")==0)
else if (strcasecmp(name, "random_seed") == 0)
show_random_seed();
else
{
const char * val = GetConfigOption(name);
const char *val = GetConfigOption(name);
elog(NOTICE, "%s is %s", name, val);
}
}
@ -756,19 +757,19 @@ GetPGVariable(const char *name)
void
ResetPGVariable(const char *name)
{
if (strcasecmp(name, "datestyle")==0)
if (strcasecmp(name, "datestyle") == 0)
reset_date();
else if (strcasecmp(name, "timezone")==0)
else if (strcasecmp(name, "timezone") == 0)
reset_timezone();
else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
reset_DefaultXactIsoLevel();
else if (strcasecmp(name, "XactIsoLevel")==0)
else if (strcasecmp(name, "XactIsoLevel") == 0)
reset_XactIsoLevel();
else if (strcasecmp(name, "client_encoding")==0)
else if (strcasecmp(name, "client_encoding") == 0)
reset_client_encoding();
else if (strcasecmp(name, "server_encoding")==0)
else if (strcasecmp(name, "server_encoding") == 0)
reset_server_encoding();
else if (strcasecmp(name, "random_seed")==0)
else if (strcasecmp(name, "random_seed") == 0)
reset_random_seed();
else
SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: view.c,v 1.53 2001/01/24 19:42:53 momjian Exp $
* $Id: view.c,v 1.54 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -57,7 +57,7 @@ DefineVirtualRelation(char *relname, List *tlist)
TargetEntry *entry = lfirst(t);
Resdom *res = entry->resdom;
if (! res->resjunk)
if (!res->resjunk)
{
char *resname = res->resname;
char *restypename = typeidTypeName(res->restype);
@ -118,9 +118,9 @@ MakeRetrieveViewRuleName(char *viewName)
snprintf(buf, buflen, "_RET%s", viewName);
/* clip to less than NAMEDATALEN bytes, if necessary */
#ifdef MULTIBYTE
maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN-1);
maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN - 1);
#else
maxlen = NAMEDATALEN-1;
maxlen = NAMEDATALEN - 1;
#endif
if (maxlen < buflen)
buf[maxlen] = '\0';
@ -211,12 +211,12 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
*rt_entry2;
/*
* Make a copy of the given parsetree. It's not so much that we
* don't want to scribble on our input, it's that the parser has
* a bad habit of outputting multiple links to the same subtree
* for constructs like BETWEEN, and we mustn't have OffsetVarNodes
* increment the varno of a Var node twice. copyObject will expand
* any multiply-referenced subtree into multiple copies.
* Make a copy of the given parsetree. It's not so much that we don't
* want to scribble on our input, it's that the parser has a bad habit
* of outputting multiple links to the same subtree for constructs
* like BETWEEN, and we mustn't have OffsetVarNodes increment the
* varno of a Var node twice. copyObject will expand any
* multiply-referenced subtree into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@ -261,6 +261,7 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
void
DefineView(char *viewName, Query *viewParse)
{
/*
* Create the "view" relation NOTE: if it already exists, the xact
* will be aborted.
@ -295,9 +296,10 @@ DefineView(char *viewName, Query *viewParse)
void
RemoveView(char *viewName)
{
/*
* We just have to drop the relation; the associated rules will
* be cleaned up automatically.
* We just have to drop the relation; the associated rules will be
* cleaned up automatically.
*/
heap_drop_with_catalog(viewName, allowSystemTableMods);
}

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: execAmi.c,v 1.56 2001/01/24 19:42:53 momjian Exp $
* $Id: execAmi.c,v 1.57 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.25 2001/01/29 00:39:17 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.26 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -265,6 +265,7 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
void
ExecFreeJunkFilter(JunkFilter *junkfilter)
{
/*
* Since the junkfilter is inside its own context, we just have to
* delete the context and we're set.

View File

@ -27,7 +27,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.138 2001/01/29 00:39:18 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.139 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -287,6 +287,7 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
static void
ExecCheckQueryPerms(CmdType operation, Query *parseTree, Plan *plan)
{
/*
* Check RTEs in the query's primary rangetable.
*/
@ -405,12 +406,13 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
relName = rte->relname;
/*
* userid to check as: current user unless we have a setuid indication.
* userid to check as: current user unless we have a setuid
* indication.
*
* Note: GetUserId() is presently fast enough that there's no harm
* in calling it separately for each RTE. If that stops being true,
* we could call it once in ExecCheckQueryPerms and pass the userid
* down from there. But for now, no need for the extra clutter.
* Note: GetUserId() is presently fast enough that there's no harm in
* calling it separately for each RTE. If that stops being true, we
* could call it once in ExecCheckQueryPerms and pass the userid down
* from there. But for now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@ -426,6 +428,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
if (rte->checkForWrite)
{
/*
* Note: write access in a SELECT context means SELECT FOR UPDATE.
* Right now we don't distinguish that from true update as far as
@ -519,6 +522,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (resultRelations != NIL)
{
/*
* Multiple result relations (due to inheritance)
* parseTree->resultRelations identifies them all
@ -541,8 +545,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
/*
* Single result relation identified by parseTree->resultRelation
* Single result relation identified by
* parseTree->resultRelation
*/
numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
@ -559,6 +565,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
/*
* if no result relation, then set state appropriately
*/
@ -616,10 +623,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
tupType = ExecGetTupType(plan); /* tuple descriptor */
/*
* Initialize the junk filter if needed. SELECT and INSERT queries need
* a filter if there are any junk attrs in the tlist. UPDATE and
* DELETE always need one, since there's always a junk 'ctid' attribute
* present --- no need to look first.
* Initialize the junk filter if needed. SELECT and INSERT queries
* need a filter if there are any junk attrs in the tlist. UPDATE and
* DELETE always need one, since there's always a junk 'ctid'
* attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@ -650,11 +657,12 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (junk_filter_needed)
{
/*
* If there are multiple result relations, each one needs
* its own junk filter. Note this is only possible for
* UPDATE/DELETE, so we can't be fooled by some needing
* a filter and some not.
* If there are multiple result relations, each one needs its
* own junk filter. Note this is only possible for
* UPDATE/DELETE, so we can't be fooled by some needing a
* filter and some not.
*/
if (parseTree->resultRelations != NIL)
{
@ -678,6 +686,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
resultRelInfo++;
subplans = lnext(subplans);
}
/*
* Set active junkfilter too; at this point ExecInitAppend
* has already selected an active result relation...
@ -750,10 +759,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the into relation.
* Note that AlterTableCreateToastTable ends with
* CommandCounterIncrement(), so that the TOAST table will
* be visible for insertion.
* If necessary, create a TOAST table for the into
* relation. Note that AlterTableCreateToastTable ends
* with CommandCounterIncrement(), so that the TOAST table
* will be visible for insertion.
*/
AlterTableCreateToastTable(intoName, true);
@ -817,9 +826,8 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
* index entries for the tuples we add/update. We need not do
* this for a DELETE, however, since deletion doesn't affect
* indexes.
* index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE)
@ -857,8 +865,8 @@ EndPlan(Plan *plan, EState *estate)
estate->es_tupleTable = NULL;
/*
* close the result relation(s) if any, but hold locks
* until xact commit. Also clean up junkfilters if present.
* close the result relation(s) if any, but hold locks until xact
* commit. Also clean up junkfilters if present.
*/
resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--)
@ -1227,11 +1235,12 @@ ExecAppend(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself.
* The tuple table slot should not try to clear it.
* memory context, and therefore will go away by itself. The
* tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@ -1411,11 +1420,12 @@ ExecReplace(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself.
* The tuple table slot should not try to clear it.
* memory context, and therefore will go away by itself. The
* tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@ -1469,10 +1479,10 @@ lreplace:;
/*
* Note: instead of having to update the old index tuples associated
* with the heap tuple, all we do is form and insert new index
* tuples. This is because replaces are actually deletes and inserts
* and index tuple deletion is done automagically by the vacuum
* daemon. All we do is insert new index tuples. -cim 9/27/89
* with the heap tuple, all we do is form and insert new index tuples.
* This is because replaces are actually deletes and inserts and index
* tuple deletion is done automagically by the vacuum daemon. All we
* do is insert new index tuples. -cim 9/27/89
*/
/*
@ -1525,8 +1535,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
}
/*
* We will use the EState's per-tuple context for evaluating constraint
* expressions (creating it if it's not already there).
* We will use the EState's per-tuple context for evaluating
* constraint expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
@ -1568,10 +1578,10 @@ ExecConstraints(char *caller, ResultRelInfo *resultRelInfo,
for (attrChk = 1; attrChk <= natts; attrChk++)
{
if (rel->rd_att->attrs[attrChk-1]->attnotnull &&
if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
heap_attisnull(tuple, attrChk))
elog(ERROR, "%s: Fail to add null value in not null attribute %s",
caller, NameStr(rel->rd_att->attrs[attrChk-1]->attname));
caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
}
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.83 2001/01/29 00:39:18 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.84 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -112,10 +112,11 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
isDone));
/*
* If refexpr yields NULL, result is always NULL, for now anyway.
* (This means you cannot assign to an element or slice of an array
* that's NULL; it'll just stay NULL.)
* (This means you cannot assign to an element or slice of an
* array that's NULL; it'll just stay NULL.)
*/
if (*isNull)
return (Datum) NULL;
@ -147,7 +148,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
/* If any index expr yields NULL, result is NULL or source array */
if (*isNull)
{
if (! isAssignment || array_source == NULL)
if (!isAssignment || array_source == NULL)
return (Datum) NULL;
*isNull = false;
return PointerGetDatum(array_source);
@ -166,10 +167,14 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL));
/* If any index expr yields NULL, result is NULL or source array */
/*
* If any index expr yields NULL, result is NULL or source
* array
*/
if (*isNull)
{
if (! isAssignment || array_source == NULL)
if (!isAssignment || array_source == NULL)
return (Datum) NULL;
*isNull = false;
return PointerGetDatum(array_source);
@ -189,9 +194,10 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL);
/*
* For now, can't cope with inserting NULL into an array,
* so make it a no-op per discussion above...
* For now, can't cope with inserting NULL into an array, so make
* it a no-op per discussion above...
*/
if (*isNull)
{
@ -601,10 +607,12 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
if (thisArgIsDone != ExprSingleResult)
{
/*
* We allow only one argument to have a set value; we'd need
* much more complexity to keep track of multiple set arguments
* (cf. ExecTargetList) and it doesn't seem worth it.
* much more complexity to keep track of multiple set
* arguments (cf. ExecTargetList) and it doesn't seem worth
* it.
*/
if (argIsDone != ExprSingleResult)
elog(ERROR, "Functions and operators can take only one set argument");
@ -639,8 +647,8 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
/*
* arguments is a list of expressions to evaluate before passing to
* the function manager. We skip the evaluation if it was already
* done in the previous call (ie, we are continuing the evaluation
* of a set-valued function). Otherwise, collect the current argument
* done in the previous call (ie, we are continuing the evaluation of
* a set-valued function). Otherwise, collect the current argument
* values into fcache->fcinfo.
*/
if (fcache->fcinfo.nargs > 0 && !fcache->argsValid)
@ -664,6 +672,7 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
*/
if (fcache->func.fn_retset || fcache->hasSetArg)
{
/*
* We need to return a set result. Complain if caller not ready
* to accept one.
@ -672,15 +681,16 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
elog(ERROR, "Set-valued function called in context that cannot accept a set");
/*
* This loop handles the situation where we have both a set argument
* and a set-valued function. Once we have exhausted the function's
* value(s) for a particular argument value, we have to get the next
* argument value and start the function over again. We might have
* to do it more than once, if the function produces an empty result
* set for a particular input value.
* This loop handles the situation where we have both a set
* argument and a set-valued function. Once we have exhausted the
* function's value(s) for a particular argument value, we have to
* get the next argument value and start the function over again.
* We might have to do it more than once, if the function produces
* an empty result set for a particular input value.
*/
for (;;)
{
/*
* If function is strict, and there are any NULL arguments,
* skip calling the function (at least for this set of args).
@ -716,13 +726,15 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
if (*isDone != ExprEndResult)
{
/*
* Got a result from current argument. If function itself
* returns set, flag that we want to reuse current argument
* values on next call.
* returns set, flag that we want to reuse current
* argument values on next call.
*/
if (fcache->func.fn_retset)
fcache->argsValid = true;
/*
* Make sure we say we are returning a set, even if the
* function itself doesn't return sets.
@ -762,11 +774,12 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
}
else
{
/*
* Non-set case: much easier.
*
* If function is strict, and there are any NULL arguments,
* skip calling the function and return NULL.
* If function is strict, and there are any NULL arguments, skip
* calling the function and return NULL.
*/
if (fcache->func.fn_strict)
{
@ -852,9 +865,9 @@ ExecEvalFunc(Expr *funcClause,
FunctionCachePtr fcache;
/*
* we extract the oid of the function associated with the func node and
* then pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* we extract the oid of the function associated with the func node
* and then pass the work onto ExecMakeFunctionResult which evaluates
* the arguments and returns the result of calling the function on the
* evaluated arguments.
*
* this is nearly identical to the ExecEvalOper code.
@ -915,7 +928,7 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
* evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa.
*/
return BoolGetDatum(! DatumGetBool(expr_value));
return BoolGetDatum(!DatumGetBool(expr_value));
}
/* ----------------------------------------------------------------
@ -999,7 +1012,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
*/
if (*isNull)
AnyNull = true; /* remember we got a null */
else if (! DatumGetBool(clause_value))
else if (!DatumGetBool(clause_value))
return clause_value;
}
@ -1359,7 +1372,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
}
else
{
if (! DatumGetBool(expr_value))
if (!DatumGetBool(expr_value))
{
result = false; /* definitely FALSE */
break;
@ -1408,14 +1421,12 @@ ExecCleanTargetListLength(List *targetlist)
if (curTle->resdom != NULL)
{
if (! curTle->resdom->resjunk)
if (!curTle->resdom->resjunk)
len++;
}
else
{
len += curTle->fjoin->fj_nNodes;
}
}
return len;
}
@ -1440,6 +1451,7 @@ ExecTargetList(List *targetlist,
ExprDoneCond *isDone)
{
MemoryContext oldContext;
#define NPREALLOCDOMAINS 64
char nullsArray[NPREALLOCDOMAINS];
bool fjIsNullArray[NPREALLOCDOMAINS];
@ -1484,10 +1496,11 @@ ExecTargetList(List *targetlist,
* we have a really large targetlist. otherwise we use the stack.
*
* We also allocate a bool array that is used to hold fjoin result state,
* and another array that holds the isDone status for each targetlist item.
* The isDone status is needed so that we can iterate, generating multiple
* tuples, when one or more tlist items return sets. (We expect the caller
* to call us again if we return *isDone = ExprMultipleResult.)
* and another array that holds the isDone status for each targetlist
* item. The isDone status is needed so that we can iterate,
* generating multiple tuples, when one or more tlist items return
* sets. (We expect the caller to call us again if we return *isDone
* = ExprMultipleResult.)
*/
if (nodomains > NPREALLOCDOMAINS)
{
@ -1554,8 +1567,10 @@ ExecTargetList(List *targetlist,
ExecEvalFjoin(tle, econtext, fjIsNull, isDone);
/* XXX this is wrong, but since fjoin code is completely broken
* anyway, I'm not going to worry about it now --- tgl 8/23/00
/*
* XXX this is wrong, but since fjoin code is completely
* broken anyway, I'm not going to worry about it now --- tgl
* 8/23/00
*/
if (isDone && *isDone == ExprEndResult)
{
@ -1594,6 +1609,7 @@ ExecTargetList(List *targetlist,
if (haveDoneSets)
{
/*
* note: can't get here unless we verified isDone != NULL
*/
@ -1601,7 +1617,8 @@ ExecTargetList(List *targetlist,
{
/*
* all sets are done, so report that tlist expansion is complete.
* all sets are done, so report that tlist expansion is
* complete.
*/
*isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext);
@ -1644,10 +1661,11 @@ ExecTargetList(List *targetlist,
}
}
}
/*
* If we cannot make a tuple because some sets are empty,
* we still have to cycle the nonempty sets to completion,
* else resources will not be released from subplans etc.
* If we cannot make a tuple because some sets are empty, we
* still have to cycle the nonempty sets to completion, else
* resources will not be released from subplans etc.
*/
if (*isDone == ExprEndResult)
{

View File

@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.15 2001/01/24 19:42:54 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.16 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.46 2001/01/29 00:39:18 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.47 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -422,7 +422,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
slot->val = (HeapTuple) NULL;
slot->ttc_shouldFree = true; /* probably useless code... */
slot->ttc_shouldFree = true;/* probably useless code... */
/* ----------------
* Drop the pin on the referenced buffer, if there is one.
@ -541,11 +541,13 @@ ExecInitExtraTupleSlot(EState *estate)
TupleTableSlot *
ExecInitNullTupleSlot(EState *estate, TupleDesc tupType)
{
TupleTableSlot* slot = ExecInitExtraTupleSlot(estate);
TupleTableSlot *slot = ExecInitExtraTupleSlot(estate);
/*
* Since heap_getattr() will treat attributes beyond a tuple's t_natts
* as being NULL, we can make an all-nulls tuple just by making it be of
* zero length. However, the slot descriptor must match the real tupType.
* as being NULL, we can make an all-nulls tuple just by making it be
* of zero length. However, the slot descriptor must match the real
* tupType.
*/
HeapTuple nullTuple;
Datum values[1];

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.73 2001/01/29 00:39:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.74 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -148,6 +148,7 @@ ExecAssignExprContext(EState *estate, CommonState *commonstate)
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = CurrentMemoryContext;
/*
* Create working memory for expression evaluation in this context.
*/
@ -184,14 +185,16 @@ MakeExprContext(TupleTableSlot *slot,
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = queryContext;
/*
* We make the temporary context a child of current working context,
* not of the specified queryContext. This seems reasonable but I'm
* not totally sure about it...
*
* Expression contexts made via this routine typically don't live long
* enough to get reset, so specify a minsize of 0. That avoids alloc'ing
* any memory in the common case where expr eval doesn't use any.
* enough to get reset, so specify a minsize of 0. That avoids
* alloc'ing any memory in the common case where expr eval doesn't use
* any.
*/
econtext->ecxt_per_tuple_memory =
AllocSetContextCreate(CurrentMemoryContext,
@ -467,7 +470,7 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
resultRelInfo->ri_NumIndices = 0;
/* checks for disabled indexes */
if (! RelationGetForm(resultRelation)->relhasindex)
if (!RelationGetForm(resultRelation)->relhasindex)
return;
if (IsIgnoringSystemIndexes() &&
IsSystemRelationName(RelationGetRelationName(resultRelation)))
@ -635,8 +638,9 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapDescriptor = RelationGetDescr(heapRelation);
/*
* We will use the EState's per-tuple context for evaluating predicates
* and functional-index functions (creating it if it's not already there).
* We will use the EState's per-tuple context for evaluating
* predicates and functional-index functions (creating it if it's not
* already there).
*/
econtext = GetPerTupleExprContext(estate);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.43 2001/01/29 00:39:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.44 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -235,9 +235,7 @@ init_sql_fcache(FmgrInfo *finfo)
nargs * sizeof(Oid));
}
else
{
argOidVect = (Oid *) NULL;
}
tmp = SysCacheGetAttr(PROCOID,
procedureTuple,
@ -346,8 +344,8 @@ copy_function_result(SQLFunctionCachePtr fcache,
return resultSlot; /* no need to copy result */
/*
* If first time through, we have to initialize the funcSlot's
* tuple descriptor.
* If first time through, we have to initialize the funcSlot's tuple
* descriptor.
*/
if (funcSlot->ttc_tupleDescriptor == NULL)
{
@ -415,12 +413,14 @@ postquel_execute(execution_state *es,
/*
* If we are supposed to return a tuple, we return the tuple slot
* pointer converted to Datum. If we are supposed to return a simple
* value, then project out the first attribute of the result tuple
* (ie, take the first result column of the final SELECT).
* pointer converted to Datum. If we are supposed to return a
* simple value, then project out the first attribute of the
* result tuple (ie, take the first result column of the final
* SELECT).
*/
if (fcache->returnsTuple)
{
/*
* XXX do we need to remove junk attrs from the result tuple?
* Probably OK to leave them, as long as they are at the end.
@ -434,6 +434,7 @@ postquel_execute(execution_state *es,
1,
resSlot->ttc_tupleDescriptor,
&(fcinfo->isnull));
/*
* Note: if result type is pass-by-reference then we are
* returning a pointer into the tuple copied by

Some files were not shown because too many files have changed in this diff Show More