pgindent run over code.

This commit is contained in:
Bruce Momjian 1999-05-25 16:15:34 +00:00
parent 4b04b01aaa
commit 07842084fe
413 changed files with 11723 additions and 10769 deletions

View File

@ -43,7 +43,7 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
int ndim,
*dim;
char *p;
FmgrInfo finf; /*Tobias Gabele Jan 18 1999*/
FmgrInfo finf; /* Tobias Gabele Jan 18 1999 */
/* Sanity checks */
@ -75,9 +75,9 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
/* Lookup the function entry point */
proc_fn = (func_ptr) NULL;
fmgr_info(proc,&finf); /*Tobias Gabele Jan 18 1999*/
proc_fn=finf.fn_addr; /*Tobias Gabele Jan 18 1999*/
pronargs=finf.fn_nargs; /*Tobias Gabele Jan 18 1999*/
fmgr_info(proc, &finf); /* Tobias Gabele Jan 18 1999 */
proc_fn = finf.fn_addr; /* Tobias Gabele Jan 18 1999 */
pronargs = finf.fn_nargs; /* Tobias Gabele Jan 18 1999 */
if ((proc_fn == NULL) || (pronargs != 2))
{
elog(ERROR, "array_iterator: fmgr_info lookup failed for oid %d", proc);
@ -110,38 +110,26 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
{
result = (int) (*proc_fn) (p, value);
if (typlen > 0)
{
p += typlen;
}
else
{
p += INTALIGN(*(int32 *) p);
}
}
if (result)
{
if (!and)
{
return (1);
}
}
else
{
if (and)
{
return (0);
}
}
}
if (and && result)
{
return (1);
}
else
{
return (0);
}
}
/*

View File

@ -1,8 +1,7 @@
#ifndef ARRAY_ITERATOR_H
#define ARRAY_ITERATOR_H
static int32
array_iterator(Oid elemtype, Oid proc, int and,
static int32 array_iterator(Oid elemtype, Oid proc, int and,
ArrayType *array, Datum value);
int32 array_texteq(ArrayType *array, char *value);
int32 array_all_texteq(ArrayType *array, char *value);
@ -26,4 +25,5 @@ int32 array_int4le(ArrayType *array, int4 value);
int32 array_all_int4le(ArrayType *array, int4 value);
int32 array_oideq(ArrayType *array, Oid value);
int32 array_all_oidne(ArrayType *array, Oid value);
#endif

View File

@ -20,7 +20,8 @@ const TWO_PI = 2.0 * M_PI;
******************************************************/
static double
degtorad (double degrees) {
degtorad(double degrees)
{
return (degrees / 360.0) * TWO_PI;
}
@ -39,27 +40,31 @@ degtorad (double degrees) {
******************************************************/
double *
geo_distance (Point *pt1, Point *pt2) {
geo_distance(Point *pt1, Point *pt2)
{
double long1, lat1, long2, lat2;
double long1,
lat1,
long2,
lat2;
double longdiff;
double * resultp = palloc (sizeof(double));
double *resultp = palloc(sizeof(double));
/* convert degrees to radians */
long1 = degtorad (pt1->x);
lat1 = degtorad (pt1->y);
long1 = degtorad(pt1->x);
lat1 = degtorad(pt1->y);
long2 = degtorad (pt2->x);
lat2 = degtorad (pt2->y);
long2 = degtorad(pt2->x);
lat2 = degtorad(pt2->y);
/* compute difference in longitudes - want < 180 degrees */
longdiff = fabs (long1 - long2);
longdiff = fabs(long1 - long2);
if (longdiff > M_PI)
longdiff = TWO_PI - longdiff;
* resultp = EARTH_RADIUS * acos
(sin (lat1) * sin (lat2) + cos (lat1) * cos (lat2) * cos (longdiff));
*resultp = EARTH_RADIUS * acos
(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(longdiff));
return resultp;
}

View File

@ -9,7 +9,8 @@
#include <libpq-fe.h>
#include "pginterface.h"
PGresult *attres, *relres;
PGresult *attres,
*relres;
int
main(int argc, char **argv)
@ -65,17 +66,17 @@ main(int argc, char **argv)
{
unset_result(relres);
if (strcmp(typname, "oid") == 0)
sprintf(query,"\
sprintf(query, "\
DECLARE c_matches BINARY CURSOR FOR \
SELECT count(*)
FROM %s t1, %s t2 \
WHERE t1.%s = t2.oid", relname, relname2, attname);
SELECT count(*) \
FROM % s t1, %s t2 \
WHERE t1.% s = t2.oid ", relname, relname2, attname);
else
sprintf(query,"\
sprintf(query, "\
DECLARE c_matches BINARY CURSOR FOR \
SELECT count(*)
FROM %s t1, %s t2 \
WHERE RegprocToOid(t1.%s) = t2.oid", relname, relname2, attname);
SELECT count(*) \
FROM % s t1, %s t2 \
WHERE RegprocToOid(t1.% s) = t2.oid ", relname, relname2, attname);
doquery(query);
doquery("FETCH ALL IN c_matches");

View File

@ -62,8 +62,8 @@ select p.* from product p, title_fti f1, title_fti f2 where
*/
HeapTuple fti(void);
char *breakup(char*, char*);
bool is_stopword(char*);
char *breakup(char *, char *);
bool is_stopword(char *);
bool new_tuple = false;
@ -104,19 +104,19 @@ fti()
char *indexname; /* name of table for substrings */
HeapTuple rettuple = NULL;
TupleDesc tupdesc; /* tuple description */
bool isinsert=false;
bool isdelete=false;
bool isinsert = false;
bool isdelete = false;
int ret;
char query[8192];
Oid oid;
/*
FILE *debug;
* FILE *debug;
*/
/*
debug = fopen("/dev/xconsole", "w");
fprintf(debug, "FTI: entered function\n");
fflush(debug);
* debug = fopen("/dev/xconsole", "w"); fprintf(debug, "FTI: entered
* function\n"); fflush(debug);
*/
if (!CurrentTriggerData)
@ -127,23 +127,27 @@ fti()
elog(ERROR, "Full Text Indexing: must be fired AFTER event");
if (TRIGGER_FIRED_BY_INSERT(CurrentTriggerData->tg_event))
isinsert=true;
isinsert = true;
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
{ isdelete=true;isinsert=true;}
{
isdelete = true;
isinsert = true;
}
if (TRIGGER_FIRED_BY_DELETE(CurrentTriggerData->tg_event))
isdelete=true;
isdelete = true;
trigger = CurrentTriggerData->tg_trigger;
rel = CurrentTriggerData->tg_relation;
relname = SPI_getrelname(rel);
rettuple=CurrentTriggerData->tg_trigtuple;
if (isdelete&&isinsert) /* is an UPDATE */
rettuple=CurrentTriggerData->tg_newtuple;
rettuple = CurrentTriggerData->tg_trigtuple;
if (isdelete && isinsert) /* is an UPDATE */
rettuple = CurrentTriggerData->tg_newtuple;
CurrentTriggerData = NULL; /* invalidate 'normal' calls to this function */
CurrentTriggerData = NULL; /* invalidate 'normal' calls to this
* function */
if ((ret = SPI_connect()) <0)
elog(ERROR,"Full Text Indexing: SPI_connect failed, returned %d\n",ret);
if ((ret = SPI_connect()) < 0)
elog(ERROR, "Full Text Indexing: SPI_connect failed, returned %d\n", ret);
nargs = trigger->tgnargs;
if (nargs != 2)
@ -156,9 +160,10 @@ fti()
/* get oid of current tuple, needed by all, so place here */
oid = rettuple->t_data->t_oid;
if (!OidIsValid(oid))
elog(ERROR,"Full Text Indexing: oid of current tuple is NULL");
elog(ERROR, "Full Text Indexing: oid of current tuple is NULL");
if (isdelete) {
if (isdelete)
{
void *pplan;
Oid *argtypes;
Datum values[1];
@ -166,8 +171,9 @@ fti()
sprintf(query, "D%s$%s", args[0], args[1]);
plan = find_plan(query, &DeletePlans, &nDeletePlans);
if (plan->nplans <= 0) {
argtypes = (Oid *)palloc(sizeof(Oid));
if (plan->nplans <= 0)
{
argtypes = (Oid *) palloc(sizeof(Oid));
argtypes[0] = OIDOID;
@ -181,7 +187,7 @@ fti()
elog(ERROR, "Full Text Indexing: SPI_saveplan returned NULL "
"in delete");
plan->splan = (void **)malloc(sizeof(void*));
plan->splan = (void **) malloc(sizeof(void *));
*(plan->splan) = pplan;
plan->nplans = 1;
}
@ -193,8 +199,10 @@ fti()
elog(ERROR, "Full Text Indexing: error executing plan in delete");
}
if (isinsert) {
char *substring, *column;
if (isinsert)
{
char *substring,
*column;
void *pplan;
Oid *argtypes;
Datum values[2];
@ -206,11 +214,12 @@ fti()
plan = find_plan(query, &InsertPlans, &nInsertPlans);
/* no plan yet, so allocate mem for argtypes */
if (plan->nplans <= 0) {
argtypes = (Oid *)palloc(2*sizeof(Oid));
if (plan->nplans <= 0)
{
argtypes = (Oid *) palloc(2 * sizeof(Oid));
argtypes[0] = VARCHAROID; /*create table t_name
(string varchar, */
argtypes[0] = VARCHAROID; /* create table t_name (string
* varchar, */
argtypes[1] = OIDOID; /* id oid); */
/* prepare plan to gain speed */
@ -226,14 +235,14 @@ fti()
elog(ERROR, "Full Text Indexing: SPI_saveplan returned NULL"
" in insert");
plan->splan = (void **)malloc(sizeof(void*));
plan->splan = (void **) malloc(sizeof(void *));
*(plan->splan) = pplan;
plan->nplans = 1;
}
/* prepare plan for query */
colnum=SPI_fnumber(tupdesc, args[1]);
colnum = SPI_fnumber(tupdesc, args[1]);
if (colnum == SPI_ERROR_NOATTRIBUTE)
elog(ERROR, "Full Text Indexing: column '%s' of '%s' not found",
args[1], args[0]);
@ -241,26 +250,30 @@ fti()
/* Get the char* representation of the column with name args[1] */
column = SPI_getvalue(rettuple, tupdesc, colnum);
if (column) { /* make sure we don't try to index NULL's */
if (column)
{ /* make sure we don't try to index NULL's */
char *buff;
char *string = column;
while(*string != '\0') { /* placed 'really' inline. */
*string = tolower(*string); /* some compilers will choke */
while (*string != '\0')
{ /* placed 'really' inline. */
*string = tolower(*string); /* some compilers will
* choke */
string++; /* on 'inline' keyword */
}
data = (struct varlena*)palloc(sizeof(int32)+strlen(column)+1);
data = (struct varlena *) palloc(sizeof(int32) + strlen(column) +1);
buff = palloc(strlen(column) + 1);
/* saves lots of calls in while-loop and in breakup()*/
/* saves lots of calls in while-loop and in breakup() */
new_tuple=true;
while ((substring = breakup(column, buff))) {
new_tuple = true;
while ((substring = breakup(column, buff)))
{
int l;
l = strlen(substring);
data->vl_len = l+sizeof(int32);
data->vl_len = l + sizeof(int32);
memcpy(VARDATA(data), substring, l);
values[0] = PointerGetDatum(data);
values[1] = oid;
@ -279,41 +292,48 @@ fti()
return (rettuple);
}
char *breakup(char *string, char *substring)
char *
breakup(char *string, char *substring)
{
static char *last_start;
static char *cur_pos;
if (new_tuple)
{
cur_pos=last_start=&string[strlen(string)-1];
new_tuple=false; /* don't initialize this next time */
cur_pos = last_start = &string[strlen(string) - 1];
new_tuple = false; /* don't initialize this next time */
}
while (cur_pos > string) /* don't read before start of 'string' */
{
/* skip pieces at the end of a string that are not
alfa-numeric (ie. 'string$%^&', last_start first points to
'&', and after this to 'g' */
if (!isalnum((int)*last_start)) {
while (!isalnum((int)*last_start) &&
/*
* skip pieces at the end of a string that are not alfa-numeric
* (ie. 'string$%^&', last_start first points to '&', and after
* this to 'g'
*/
if (!isalnum((int) *last_start))
{
while (!isalnum((int) *last_start) &&
last_start > string)
last_start--;
cur_pos=last_start;
cur_pos = last_start;
}
cur_pos--; /* substrings are at minimum 2 characters long */
cur_pos--; /* substrings are at minimum 2 characters
* long */
if (isalnum((int)*cur_pos))
if (isalnum((int) *cur_pos))
{
/* Houston, we have a substring! :) */
memcpy(substring, cur_pos, last_start - cur_pos + 1);
substring[last_start-cur_pos+1]='\0';
if (!is_stopword(substring)) return substring;
substring[last_start - cur_pos + 1] = '\0';
if (!is_stopword(substring))
return substring;
}
else
{
last_start=cur_pos-1;
last_start = cur_pos - 1;
cur_pos = last_start;
}
}

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for ISBNs.
*
* $Id: isbn.c,v 1.1 1998/08/17 03:35:04 scrappy Exp $
* $Id: isbn.c,v 1.2 1999/05/25 16:05:40 momjian Exp $
*/
#include <stdio.h>
@ -50,11 +50,13 @@ isbn_in(char *str)
char *cp;
int count;
if (strlen(str) != 13) {
if (strlen(str) != 13)
{
elog(ERROR, "isbn_in: invalid ISBN \"%s\"", str);
return (NULL);
}
if (isbn_sum(str) != 0) {
if (isbn_sum(str) != 0)
{
elog(ERROR, "isbn_in: purported ISBN \"%s\" failed checksum",
str);
return (NULL);
@ -84,23 +86,35 @@ isbn_in(char *str)
int4
isbn_sum(char *str)
{
int4 sum = 0, dashes = 0, val;
int4 sum = 0,
dashes = 0,
val;
int i;
for (i = 0; str[i] && i < 13; i++) {
switch(str[i]) {
for (i = 0; str[i] && i < 13; i++)
{
switch (str[i])
{
case '-':
if (++dashes > 3)
return 12;
continue;
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
val = str[i] - '0';
break;
case 'X': case 'x':
case 'X':
case 'x':
val = 10;
break;

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for ISSNs.
*
* $Id: issn.c,v 1.1 1998/08/17 03:35:05 scrappy Exp $
* $Id: issn.c,v 1.2 1999/05/25 16:05:42 momjian Exp $
*/
#include <stdio.h>
@ -50,11 +50,13 @@ issn_in(char *str)
char *cp;
int count;
if (strlen(str) != 9) {
if (strlen(str) != 9)
{
elog(ERROR, "issn_in: invalid ISSN \"%s\"", str);
return (NULL);
}
if (issn_sum(str) != 0) {
if (issn_sum(str) != 0)
{
elog(ERROR, "issn_in: purported ISSN \"%s\" failed checksum",
str);
return (NULL);
@ -75,23 +77,35 @@ issn_in(char *str)
int4
issn_sum(char *str)
{
int4 sum = 0, dashes = 0, val;
int4 sum = 0,
dashes = 0,
val;
int i;
for (i = 0; str[i] && i < 9; i++) {
switch(str[i]) {
for (i = 0; str[i] && i < 9; i++)
{
switch (str[i])
{
case '-':
if (++dashes > 1)
return 12;
continue;
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
val = str[i] - '0';
break;
case 'X': case 'x':
case 'X':
case 'x':
val = 10;
break;

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for managed LargeObjects.
*
* $Id: lo.c,v 1.1 1998/06/16 07:07:11 momjian Exp $
* $Id: lo.c,v 1.2 1999/05/25 16:05:45 momjian Exp $
*
*/
@ -34,8 +34,8 @@ typedef Oid Blob;
*/
Blob *lo_in(char *str); /* Create from String */
char *lo_out(Blob * addr); /* Output oid as String */
Oid lo_oid(Blob * addr); /* Return oid as an oid */
char *lo_out(Blob * addr);/* Output oid as String */
Oid lo_oid(Blob * addr);/* Return oid as an oid */
Blob *lo(Oid oid); /* Return Blob based on oid */
HeapTuple lo_manage(void); /* Trigger handler */
@ -64,7 +64,7 @@ lo_in(char *str)
return (NULL);
}
if(oid < 0)
if (oid < 0)
{
elog(ERROR, "lo_in: illegal oid \"%s\"", str);
return (NULL);
@ -72,13 +72,14 @@ lo_in(char *str)
}
else
{
/*
* There is no Oid passed, so create a new one
*/
oid = lo_creat(INV_READ|INV_WRITE);
if(oid == InvalidOid)
oid = lo_creat(INV_READ | INV_WRITE);
if (oid == InvalidOid)
{
elog(ERROR,"lo_in: InvalidOid returned from lo_creat");
elog(ERROR, "lo_in: InvalidOid returned from lo_creat");
return (NULL);
}
}
@ -102,7 +103,7 @@ lo_out(Blob * addr)
return (NULL);
result = (char *) palloc(32);
sprintf(result,"%d",*addr);
sprintf(result, "%d", *addr);
return (result);
}
@ -115,9 +116,9 @@ lo_out(Blob * addr)
Oid
lo_oid(Blob * addr)
{
if(addr == NULL)
if (addr == NULL)
return InvalidOid;
return (Oid)(*addr);
return (Oid) (*addr);
}
/*
@ -130,6 +131,7 @@ Blob *
lo(Oid oid)
{
Blob *result = (Blob *) palloc(sizeof(Blob));
*result = oid;
return (result);
}
@ -145,7 +147,7 @@ lo_manage(void)
TupleDesc tupdesc; /* Tuple Descriptor */
HeapTuple rettuple; /* Tuple to be returned */
bool isdelete; /* are we deleting? */
HeapTuple newtuple=NULL; /* The new value for tuple */
HeapTuple newtuple = NULL;/* The new value for tuple */
HeapTuple trigtuple; /* The original value of tuple */
if (!CurrentTriggerData)
@ -172,24 +174,25 @@ lo_manage(void)
CurrentTriggerData = NULL;
/* Get the column were interested in */
attnum = SPI_fnumber(tupdesc,args[0]);
attnum = SPI_fnumber(tupdesc, args[0]);
/*
* Handle updates
*
* Here, if the value of the monitored attribute changes, then the
* large object associated with the original value is unlinked.
* Here, if the value of the monitored attribute changes, then the large
* object associated with the original value is unlinked.
*/
if(newtuple!=NULL) {
char *orig = SPI_getvalue(trigtuple,tupdesc,attnum);
char *newv = SPI_getvalue(newtuple,tupdesc,attnum);
if (newtuple != NULL)
{
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
char *newv = SPI_getvalue(newtuple, tupdesc, attnum);
if((orig != newv && (orig==NULL || newv==NULL)) || (orig!=NULL && newv!=NULL && strcmp(orig,newv)))
if ((orig != newv && (orig == NULL || newv == NULL)) || (orig != NULL && newv != NULL && strcmp(orig, newv)))
lo_unlink(atoi(orig));
if(newv)
if (newv)
pfree(newv);
if(orig)
if (orig)
pfree(orig);
}
@ -199,10 +202,12 @@ lo_manage(void)
* Here, we unlink the large object associated with the managed attribute
*
*/
if(isdelete) {
char *orig = SPI_getvalue(trigtuple,tupdesc,attnum);
if (isdelete)
{
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
if(orig != NULL) {
if (orig != NULL)
{
lo_unlink(atoi(orig));
pfree(orig);

View File

@ -17,33 +17,41 @@ char *msqlErrors[] = {
"Out of database handlers."
};
char msqlErrMsg[BUFSIZ], *tfrom = "dunno";
char msqlErrMsg[BUFSIZ],
*tfrom = "dunno";
PGresult *queryres = NULL;
int msqlConnect (char *host) {
int
msqlConnect(char *host)
{
int count;
for (count = 0; count < HNDMAX; count++)
if (PGh[count] == NULL) break;
if (PGh[count] == NULL)
break;
if (count == HNDMAX) {
if (count == HNDMAX)
{
strncpy(msqlErrMsg, msqlErrors[E_NOHANDLERS], BUFSIZ);
return -1;
}
PGh[count] = malloc(sizeof (PGconn));
PGh[count] = malloc(sizeof(PGconn));
PGh[count]->pghost = host ? strdup(host) : NULL;
return count;
}
int msqlSelectDB(int handle, char *dbname) {
int
msqlSelectDB(int handle, char *dbname)
{
char *options = calloc(1, BUFSIZ);
char *e = getenv("PG_OPTIONS");
if (e == NULL)
e = "";
if (PGh[handle]->pghost) {
if (PGh[handle]->pghost)
{
strcat(options, "host=");
strncat(options, PGh[handle]->pghost, BUFSIZ);
strncat(options, " ", BUFSIZ);
@ -61,7 +69,9 @@ int msqlSelectDB(int handle, char *dbname) {
return (PQstatus(PGh[handle]) == CONNECTION_BAD ? -1 : 0);
}
int msqlQuery(int handle, char *query) {
int
msqlQuery(int handle, char *query)
{
char *tq = strdup(query);
char *p = tq;
PGresult *res;
@ -72,84 +82,122 @@ int msqlQuery(int handle, char *query) {
rcode = PQresultStatus(res);
if (rcode == PGRES_TUPLES_OK) {
if (rcode == PGRES_TUPLES_OK)
{
queryres = res;
return PQntuples(res);
} else if (rcode == PGRES_FATAL_ERROR || rcode == PGRES_NONFATAL_ERROR) {
}
else if (rcode == PGRES_FATAL_ERROR || rcode == PGRES_NONFATAL_ERROR)
{
PQclear(res);
queryres = NULL;
return -1;
} else {
}
else
{
PQclear(res);
queryres = NULL;
return 0;
}
}
int msqlCreateDB (int a, char*b) {
int
msqlCreateDB(int a, char *b)
{
char tbuf[BUFSIZ];
sprintf(tbuf, "create database %s", b);
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
}
int msqlDropDB (int a, char* b) {
int
msqlDropDB(int a, char *b)
{
char tbuf[BUFSIZ];
sprintf(tbuf, "drop database %s", b);
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
}
int msqlShutdown(int a) {
int
msqlShutdown(int a)
{
}
int msqlGetProtoInfo(void) {
int
msqlGetProtoInfo(void)
{
}
int msqlReloadAcls(int a) {
int
msqlReloadAcls(int a)
{
}
char *msqlGetServerInfo(void) {
char *
msqlGetServerInfo(void)
{
}
char *msqlGetHostInfo(void) {
char *
msqlGetHostInfo(void)
{
}
char *msqlUnixTimeToDate(time_t date) {
char *
msqlUnixTimeToDate(time_t date)
{
}
char *msqlUnixTimeToTime(time_t time) {
char *
msqlUnixTimeToTime(time_t time)
{
}
void msqlClose(int a) {
void
msqlClose(int a)
{
PQfinish(PGh[a]);
PGh[a] = NULL;
if (queryres) {
if (queryres)
{
free(queryres);
queryres = NULL;
}
}
void msqlDataSeek(m_result *result, int count) {
void
msqlDataSeek(m_result * result, int count)
{
int c;
result->cursor = result->queryData;
for (c = 1; c < count; c++)
if (result->cursor->next)
result->cursor = result->cursor->next;
}
void msqlFieldSeek(m_result *result, int count) {
void
msqlFieldSeek(m_result * result, int count)
{
int c;
result->fieldCursor = result->fieldData;
for (c = 1; c < count; c++)
if (result->fieldCursor->next)
result->fieldCursor = result->fieldCursor->next;
}
void msqlFreeResult(m_result *result) {
if (result) {
void
msqlFreeResult(m_result * result)
{
if (result)
{
/* Clears fields */
free(result->fieldData);
result->cursor = result->queryData;
while (result->cursor) {
while (result->cursor)
{
int c;
m_row m = result->cursor->data;
@ -163,113 +211,155 @@ void msqlFreeResult(m_result *result) {
}
}
m_row msqlFetchRow(m_result *row) {
m_row
msqlFetchRow(m_result * row)
{
m_data *r = row->cursor;
if (r) {
if (r)
{
row->cursor = row->cursor->next;
return (m_row)r->data;
return (m_row) r->data;
}
return (m_row)NULL;
return (m_row) NULL;
}
m_seq *msqlGetSequenceInfo(int a, char *b) {
m_seq *
msqlGetSequenceInfo(int a, char *b)
{
}
m_field *msqlFetchField (m_result *mr) {
m_field *m = (m_field*)mr->fieldCursor;
if (m) {
m_field *
msqlFetchField(m_result * mr)
{
m_field *m = (m_field *) mr->fieldCursor;
if (m)
{
mr->fieldCursor = mr->fieldCursor->next;
return m;
}
return NULL;
}
m_result *msqlListDBs(int a) {
m_result *
msqlListDBs(int a)
{
m_result *m;
if (msqlQuery(a, "select datname from pg_database") > 0) {
if (msqlQuery(a, "select datname from pg_database") > 0)
{
m = msqlStoreResult();
return m;
} else return NULL;
}
else
return NULL;
}
m_result *msqlListTables(int a) {
m_result *
msqlListTables(int a)
{
m_result *m;
char tbuf[BUFSIZ];
sprintf(tbuf, "select relname from pg_class where relkind='r' and relowner=%d", getuid());
if (msqlQuery(a, tbuf) > 0) {
if (msqlQuery(a, tbuf) > 0)
{
m = msqlStoreResult();
return m;
} else return NULL;
}
else
return NULL;
}
m_result *msqlListFields(int a, char *b) {
m_result *
msqlListFields(int a, char *b)
{
}
m_result *msqlListIndex(int a, char *b, char *c) {
m_result *
msqlListIndex(int a, char *b, char *c)
{
m_result *m;
char tbuf[BUFSIZ];
sprintf(tbuf, "select relname from pg_class where relkind='i' and relowner=%d", getuid());
if (msqlQuery(a, tbuf) > 0) {
if (msqlQuery(a, tbuf) > 0)
{
m = msqlStoreResult();
return m;
} else return NULL;
}
else
return NULL;
}
m_result *msqlStoreResult(void) {
if (queryres) {
m_result *
msqlStoreResult(void)
{
if (queryres)
{
m_result *mr = malloc(sizeof(m_result));
m_fdata *mf;
m_data *md;
int count;
mr->queryData = mr->cursor = NULL;
mr->numRows = PQntuples(queryres);
mr->numFields = PQnfields(queryres);
mf = calloc(PQnfields(queryres), sizeof(m_fdata));
for (count = 0; count < PQnfields(queryres); count++) {
(m_fdata*)(mf+count)->field.name = strdup(PQfname(queryres, count));
(m_fdata*)(mf+count)->field.table = tfrom;
(m_fdata*)(mf+count)->field.type = CHAR_TYPE;
(m_fdata*)(mf+count)->field.length = PQfsize(queryres, count);
(m_fdata*)(mf+count)->next = (m_fdata*)(mf+count+1);
for (count = 0; count < PQnfields(queryres); count++)
{
(m_fdata *) (mf + count)->field.name = strdup(PQfname(queryres, count));
(m_fdata *) (mf + count)->field.table = tfrom;
(m_fdata *) (mf + count)->field.type = CHAR_TYPE;
(m_fdata *) (mf + count)->field.length = PQfsize(queryres, count);
(m_fdata *) (mf + count)->next = (m_fdata *) (mf + count + 1);
}
(m_fdata*)(mf+count-1)->next = NULL;
(m_fdata *) (mf + count - 1)->next = NULL;
md = calloc(PQntuples(queryres), sizeof(m_data));
for (count = 0; count < PQntuples(queryres); count++) {
m_row rows = calloc(PQnfields(queryres)*sizeof(m_row)+1, 1);
for (count = 0; count < PQntuples(queryres); count++)
{
m_row rows = calloc(PQnfields(queryres) * sizeof(m_row) + 1, 1);
int c;
for (c = 0; c < PQnfields(queryres); c++) {
for (c = 0; c < PQnfields(queryres); c++)
rows[c] = strdup(PQgetvalue(queryres, count, c));
}
(m_data*)(md+count)->data = rows;
(m_data *) (md + count)->data = rows;
(m_data*)(md+count)->width = PQnfields(queryres);
(m_data*)(md+count)->next = (m_data*)(md+count+1);
(m_data *) (md + count)->width = PQnfields(queryres);
(m_data *) (md + count)->next = (m_data *) (md + count + 1);
}
(m_data*)(md+count-1)->next = NULL;
(m_data *) (md + count - 1)->next = NULL;
mr->queryData = mr->cursor = md;
mr->fieldCursor = mr->fieldData = mf;
return mr;
} else return NULL;
}
else
return NULL;
}
time_t msqlDateToUnixTime(char *a) {
time_t
msqlDateToUnixTime(char *a)
{
}
time_t msqlTimeToUnixTime(char *b) {
time_t
msqlTimeToUnixTime(char *b)
{
}
char *msql_tmpnam(void) {
char *
msql_tmpnam(void)
{
return tmpnam("/tmp/msql.XXXXXX");
}
int msqlLoadConfigFile(char *a) {
int
msqlLoadConfigFile(char *a)
{
}

View File

@ -23,6 +23,7 @@ extern int assertTest(int val);
#ifdef ASSERT_CHECKING_TEST
extern int assertEnable(int val);
#endif
int
@ -68,6 +69,7 @@ assert_test(int val)
{
return assertTest(val);
}
#endif
/* end of file */

View File

@ -7,8 +7,10 @@ int unlisten(char *relname);
int max(int x, int y);
int min(int x, int y);
int assert_enable(int val);
#ifdef ASSERT_CHECKING_TEST
int assert_test(int val);
#endif
#endif

View File

@ -99,7 +99,7 @@ noup()
if (!isnull)
{
elog(WARN, "%s: update not allowed", args[i] );
elog(WARN, "%s: update not allowed", args[i]);
SPI_finish();
return NULL;
}

View File

@ -9,7 +9,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: c.h,v 1.1 1998/10/31 04:10:53 scrappy Exp $
* $Id: c.h,v 1.2 1999/05/25 16:06:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -65,6 +65,7 @@
#ifndef __cplusplus
#ifndef bool
typedef char bool;
#endif /* ndef bool */
#endif /* not C++ */
typedef bool *BoolPtr;

View File

@ -1,11 +1,11 @@
#ifndef TCPIPV4
# define TCPIPV4
#endif
#define TCPIPV4
#endif /* */
#ifndef MAXSOCKETS
#define MAXSOCKETS 2048
#endif
#endif /* */
/*
* DEF_PGPORT is the TCP port number on which the Postmaster listens by

View File

@ -206,7 +206,8 @@ on_error_continue()
** get_result
**
*/
PGresult *get_result()
PGresult *
get_result()
{
char *cmdstatus = PQcmdStatus(res);
@ -214,7 +215,7 @@ PGresult *get_result()
/* we have to store the fetch location somewhere */
cmdstatus[0] = NUL;
memcpy(&cmdstatus[1],&tuple, sizeof(tuple));
memcpy(&cmdstatus[1], &tuple, sizeof(tuple));
return res;
}
@ -224,7 +225,8 @@ PGresult *get_result()
** set_result
**
*/
void set_result(PGresult *newres)
void
set_result(PGresult *newres)
{
char *cmdstatus = PQcmdStatus(res);
@ -256,7 +258,8 @@ void set_result(PGresult *newres)
** unset_result
**
*/
void unset_result(PGresult *oldres)
void
unset_result(PGresult *oldres)
{
char *cmdstatus = PQcmdStatus(oldres);
@ -277,8 +280,8 @@ void unset_result(PGresult *oldres)
** reset_fetch
**
*/
void reset_fetch()
void
reset_fetch()
{
tuple = 0;
}

View File

@ -17,7 +17,8 @@ OH, me, I'm Terry Mackintosh <terry@terrym.com>
HeapTuple moddatetime(void);
HeapTuple moddatetime()
HeapTuple
moddatetime()
{
Trigger *trigger; /* to get trigger name */
int nargs; /* # of arguments */
@ -65,24 +66,27 @@ HeapTuple moddatetime()
/* Get the current datetime. */
newdt = datetime_in("now");
/* This gets the position in the turple of the field we want.
args[0] being the name of the field to update, as passed in
from the trigger.
/*
* This gets the position in the turple of the field we want. args[0]
* being the name of the field to update, as passed in from the
* trigger.
*/
attnum = SPI_fnumber(tupdesc, args[0]);
/* This is were we check to see if the feild we are suppost to update even
exits. The above function must return -1 if name not found?
/*
* This is were we check to see if the feild we are suppost to update
* even exits. The above function must return -1 if name not found?
*/
if (attnum < 0)
elog(ERROR, "moddatetime (%s): there is no attribute %s", relname,
args[0]);
/* OK, this is where we make sure the datetime field that we are
modifying is really a datetime field.
Hay, error checking, what a novel idea !-)
/*
* OK, this is where we make sure the datetime field that we are
* modifying is really a datetime field. Hay, error checking, what a
* novel idea !-)
*/
if (SPI_gettypeid(tupdesc, attnum) != DATETIMEOID )
if (SPI_gettypeid(tupdesc, attnum) != DATETIMEOID)
elog(ERROR, "moddatetime (%s): attribute %s must be of DATETIME type",
relname, args[0]);

View File

@ -1,24 +1,27 @@
#include <stdio.h>
char *strtoupper(char *string)
char *
strtoupper(char *string)
{
int i ;
for (i=0;i<strlen(string);i++)
{
string[i]=toupper(string[i]);
}
int i;
for (i = 0; i < strlen(string); i++)
string[i] = toupper(string[i]);
return string;
}
void main ( char argc , char **argv )
void
main(char argc, char **argv)
{
char str[250];
int sw = 0 ;
while ( fgets (str,240,stdin) )
int sw = 0;
while (fgets(str, 240, stdin))
{
if ( sw == 0 ) printf("%s",strtoupper(str));
if (sw == 0)
printf("%s", strtoupper(str));
}
}

View File

@ -61,7 +61,7 @@ check_primary_key()
* Some checks first...
*/
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_primary_key Enter Function");
elog(NOTICE, "Check_primary_key Enter Function");
#endif
/* Called by trigger manager ? */
if (!CurrentTriggerData)
@ -228,7 +228,7 @@ check_foreign_key()
Trigger *trigger; /* to get trigger name */
int nargs; /* # of args specified in CREATE TRIGGER */
char **args; /* arguments: as described above */
char **args_temp ;
char **args_temp;
int nrefs; /* number of references (== # of plans) */
char action; /* 'R'estrict | 'S'etnull | 'C'ascade */
int nkeys; /* # of key columns */
@ -244,13 +244,15 @@ check_foreign_key()
bool isequal = true; /* are keys in both tuples equal (in
* UPDATE) */
char ident[2 * NAMEDATALEN]; /* to identify myself */
int is_update=0;
int is_update = 0;
int ret;
int i,
r;
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_foreign_key Enter Function");
elog(NOTICE, "Check_foreign_key Enter Function");
#endif
/*
* Some checks first...
*/
@ -275,11 +277,11 @@ check_foreign_key()
* key in tg_newtuple is the same as in tg_trigtuple then nothing to
* do.
*/
is_update=0;
is_update = 0;
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
{
newtuple = CurrentTriggerData->tg_newtuple;
is_update=1;
is_update = 1;
}
trigger = CurrentTriggerData->tg_trigger;
nargs = trigger->tgnargs;
@ -397,13 +399,14 @@ check_foreign_key()
{
void *pplan;
char sql[8192];
char **args2 = args ;
char **args2 = args;
plan->splan = (void **) malloc(nrefs * sizeof(void *));
for (r = 0; r < nrefs; r++)
{
relname = args2[0];
/*
* For 'R'estrict action we construct SELECT query - SELECT 1
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
@ -418,49 +421,58 @@ check_foreign_key()
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
* $2 [...]] - to delete all referencing tuples.
*/
/*Max : Cascade with UPDATE query i create update query that
updates new key values in referenced tables
/*
* Max : Cascade with UPDATE query i create update query that
* updates new key values in referenced tables
*/
else if (action == 'c'){
else if (action == 'c')
{
if (is_update == 1)
{
int fn;
char *nv;
int k ;
int k;
sprintf(sql, "update %s set ", relname);
for (k = 1; k <= nkeys; k++)
{
int is_char_type =0;
int is_char_type = 0;
char *type;
fn = SPI_fnumber(tupdesc, args_temp[k-1]);
fn = SPI_fnumber(tupdesc, args_temp[k - 1]);
nv = SPI_getvalue(newtuple, tupdesc, fn);
type=SPI_gettype(tupdesc,fn);
type = SPI_gettype(tupdesc, fn);
if ( (strcmp(type,"text") && strcmp (type,"varchar") &&
strcmp(type,"char") && strcmp (type,"bpchar") &&
strcmp(type,"date") && strcmp (type,"datetime")) == 0 )
is_char_type=1;
if ((strcmp(type, "text") && strcmp(type, "varchar") &&
strcmp(type, "char") && strcmp(type, "bpchar") &&
strcmp(type, "date") && strcmp(type, "datetime")) == 0)
is_char_type = 1;
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_foreign_key Debug value %s type %s %d",
nv,type,is_char_type);
elog(NOTICE, "Check_foreign_key Debug value %s type %s %d",
nv, type, is_char_type);
#endif
/* is_char_type =1 i set ' ' for define a new value
/*
* is_char_type =1 i set ' ' for define a new
* value
*/
sprintf(sql + strlen(sql), " %s = %s%s%s %s ",
args2[k], (is_char_type>0) ? "'" :"" ,
nv, (is_char_type >0) ? "'" :"",(k < nkeys) ? ", " : "");
is_char_type=0;
args2[k], (is_char_type > 0) ? "'" : "",
nv, (is_char_type > 0) ? "'" : "", (k < nkeys) ? ", " : "");
is_char_type = 0;
}
strcat(sql, " where ");
}
else /* DELETE */
else
/* DELETE */
sprintf(sql, "delete from %s where ", relname);
}
/*
* For 'S'etnull action we construct UPDATE query - UPDATE
* _referencing_relation_ SET Fkey1 null [, Fkey2 null [...]]
@ -505,7 +517,7 @@ check_foreign_key()
}
plan->nplans = nrefs;
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_foreign_key Debug Query is : %s ", sql);
elog(NOTICE, "Check_foreign_key Debug Query is : %s ", sql);
#endif
}

View File

@ -352,6 +352,7 @@ c_charin(char *str)
{
return (string_input(str, 1, 0, NULL));
}
#endif
/* end of file */

View File

@ -14,6 +14,7 @@ char *c_varcharout(char *s);
#if 0
struct varlena *c_textin(char *str);
char *c_char16in(char *str);
#endif
#endif

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.1 1999/04/10 16:48:05 peter Exp $
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.2 1999/05/25 16:06:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -25,18 +25,20 @@
#define BUFSIZE 1024
int vacuumlo(char *,int);
int vacuumlo(char *, int);
/*
* This vacuums a database. It returns 1 on success, -1 on failure.
*/
int vacuumlo(char *database,int verbose)
int
vacuumlo(char *database, int verbose)
{
PGconn *conn;
PGresult *res, *res2;
PGresult *res,
*res2;
char buf[BUFSIZE];
int matched=0; /* Number matched per scan */
int matched = 0; /* Number matched per scan */
int i;
conn = PQsetdb(NULL, NULL, NULL, NULL, database);
@ -49,50 +51,54 @@ int vacuumlo(char *database,int verbose)
return -1;
}
if(verbose)
fprintf(stdout,"Connected to %s\n",database);
if (verbose)
fprintf(stdout, "Connected to %s\n", database);
/*
* First we create and populate the lo temp table
*/
buf[0]='\0';
strcat(buf,"SELECT oid AS lo ");
strcat(buf,"INTO TEMP TABLE vacuum_l ");
strcat(buf,"FROM pg_class ");
strcat(buf,"WHERE relkind='l'");
if(!(res = PQexec(conn,buf))) {
fprintf(stderr,"Failed to create temp table.\n");
buf[0] = '\0';
strcat(buf, "SELECT oid AS lo ");
strcat(buf, "INTO TEMP TABLE vacuum_l ");
strcat(buf, "FROM pg_class ");
strcat(buf, "WHERE relkind='l'");
if (!(res = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to create temp table.\n");
PQfinish(conn);
return -1;
}
PQclear(res);
/*
* Now find any candidate tables who have columns of type oid (the column
* oid is ignored, as it has attnum < 1)
* Now find any candidate tables who have columns of type oid (the
* column oid is ignored, as it has attnum < 1)
*/
buf[0]='\0';
strcat(buf,"SELECT c.relname, a.attname ");
strcat(buf,"FROM pg_class c, pg_attribute a, pg_type t ");
strcat(buf,"WHERE a.attnum > 0 ");
strcat(buf," AND a.attrelid = c.oid ");
strcat(buf," AND a.atttypid = t.oid ");
strcat(buf," AND t.typname = 'oid' ");
strcat(buf," AND c.relname NOT LIKE 'pg_%'");
if(!(res = PQexec(conn,buf))) {
fprintf(stderr,"Failed to create temp table.\n");
buf[0] = '\0';
strcat(buf, "SELECT c.relname, a.attname ");
strcat(buf, "FROM pg_class c, pg_attribute a, pg_type t ");
strcat(buf, "WHERE a.attnum > 0 ");
strcat(buf, " AND a.attrelid = c.oid ");
strcat(buf, " AND a.atttypid = t.oid ");
strcat(buf, " AND t.typname = 'oid' ");
strcat(buf, " AND c.relname NOT LIKE 'pg_%'");
if (!(res = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to create temp table.\n");
PQfinish(conn);
return -1;
}
for(i=0;i<PQntuples(res);i++)
for (i = 0; i < PQntuples(res); i++)
{
char *table,*field;
char *table,
*field;
table = PQgetvalue(res,i,0);
field = PQgetvalue(res,i,1);
table = PQgetvalue(res, i, 0);
field = PQgetvalue(res, i, 1);
if(verbose) {
fprintf(stdout,"Checking %s in %s: ",field,table);
if (verbose)
{
fprintf(stdout, "Checking %s in %s: ", field, table);
fflush(stdout);
}
@ -100,23 +106,25 @@ int vacuumlo(char *database,int verbose)
PQclear(res2);
buf[0] = '\0';
strcat(buf,"DELETE FROM vacuum_l ");
strcat(buf,"WHERE lo IN (");
strcat(buf,"SELECT ");
strcat(buf,field);
strcat(buf," FROM ");
strcat(buf,table);
strcat(buf,");");
if(!(res2 = PQexec(conn,buf))) {
fprintf(stderr,"Failed to check %s in table %s\n",field,table);
strcat(buf, "DELETE FROM vacuum_l ");
strcat(buf, "WHERE lo IN (");
strcat(buf, "SELECT ");
strcat(buf, field);
strcat(buf, " FROM ");
strcat(buf, table);
strcat(buf, ");");
if (!(res2 = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to check %s in table %s\n", field, table);
PQclear(res);
PQfinish(conn);
return -1;
}
if(PQresultStatus(res2)!=PGRES_COMMAND_OK) {
if (PQresultStatus(res2) != PGRES_COMMAND_OK)
{
fprintf(stderr,
"Failed to check %s in table %s\n%s\n",
field,table,
field, table,
PQerrorMessage(conn)
);
PQclear(res2);
@ -139,27 +147,28 @@ int vacuumlo(char *database,int verbose)
/*
* Finally, those entries remaining in vacuum_l are orphans.
*/
buf[0]='\0';
strcat(buf,"SELECT lo ");
strcat(buf,"FROM vacuum_l");
if(!(res = PQexec(conn,buf))) {
fprintf(stderr,"Failed to read temp table.\n");
buf[0] = '\0';
strcat(buf, "SELECT lo ");
strcat(buf, "FROM vacuum_l");
if (!(res = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to read temp table.\n");
PQfinish(conn);
return -1;
}
matched=PQntuples(res);
for(i=0;i<matched;i++)
matched = PQntuples(res);
for (i = 0; i < matched; i++)
{
Oid lo = (Oid) atoi(PQgetvalue(res,i,0));
Oid lo = (Oid) atoi(PQgetvalue(res, i, 0));
if(verbose) {
fprintf(stdout,"\rRemoving lo %6d \n",lo);
if (verbose)
{
fprintf(stdout, "\rRemoving lo %6d \n", lo);
fflush(stdout);
}
if(lo_unlink(conn,lo)<0) {
fprintf(stderr,"Failed to remove lo %d\n",lo);
}
if (lo_unlink(conn, lo) < 0)
fprintf(stderr, "Failed to remove lo %d\n", lo);
}
PQclear(res);
@ -170,8 +179,8 @@ int vacuumlo(char *database,int verbose)
PQclear(res);
PQfinish(conn);
if(verbose)
fprintf(stdout,"\rRemoved %d large objects from %s.\n",matched,database);
if (verbose)
fprintf(stdout, "\rRemoved %d large objects from %s.\n", matched, database);
return 0;
}
@ -181,7 +190,7 @@ main(int argc, char **argv)
{
int verbose = 0;
int arg;
int rc=0;
int rc = 0;
if (argc < 2)
{
@ -190,11 +199,12 @@ main(int argc, char **argv)
exit(1);
}
for(arg=1;arg<argc;arg++) {
if(strcmp("-v",argv[arg])==0)
verbose=!verbose;
for (arg = 1; arg < argc; arg++)
{
if (strcmp("-v", argv[arg]) == 0)
verbose = !verbose;
else
rc += vacuumlo(argv[arg],verbose);
rc += vacuumlo(argv[arg], verbose);
}
return rc;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.50 1999/03/14 20:17:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.51 1999/05/25 16:06:35 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@ -124,7 +124,7 @@ DataFill(char *data,
*bitP |= bitmask;
}
data = (char *)att_align((long)data, att[i]->attlen, att[i]->attalign);
data = (char *) att_align((long) data, att[i]->attlen, att[i]->attalign);
switch (att[i]->attlen)
{
case -1:
@ -151,7 +151,7 @@ DataFill(char *data,
att[i]->attlen);
break;
}
data = (char *)att_addlength((long)data, att[i]->attlen, value[i]);
data = (char *) att_addlength((long) data, att[i]->attlen, value[i]);
}
}
@ -301,6 +301,7 @@ heap_getsysattr(HeapTuple tup, Buffer b, int attnum)
}
return (Datum) NULL;
}
#endif
/* ----------------
@ -376,6 +377,7 @@ nocachegetattr(HeapTuple tuple,
}
else
{
/*
* there's a null somewhere in the tuple
*/
@ -404,12 +406,13 @@ nocachegetattr(HeapTuple tuple,
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
if ((~ bp[byte]) & ((1 << finalbit) - 1))
if ((~bp[byte]) & ((1 << finalbit) - 1))
slow = 1;
else
{
/* check for nulls in any "earlier" bytes */
int i;
for (i = 0; i < byte; i++)
{
if (bp[i] != 0xFF)
@ -439,6 +442,7 @@ nocachegetattr(HeapTuple tuple,
else if (!HeapTupleAllFixed(tuple))
{
int j;
/*
* In for(), we make this <= and not < because we want to test
* if we can go past it in initializing offsets.
@ -456,9 +460,9 @@ nocachegetattr(HeapTuple tuple,
/*
* If slow is zero, and we got here, we know that we have a tuple with
* no nulls or varlenas before the target attribute.
* If possible, we also want to initialize the remainder of the
* attribute cached offset values.
* no nulls or varlenas before the target attribute. If possible, we
* also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.45 1999/05/10 00:44:50 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.46 1999/05/25 16:06:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -25,9 +25,9 @@
#include "libpq/pqformat.h"
#include "utils/syscache.h"
static void printtup_setup(DestReceiver* self, TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self);
static void printtup_cleanup(DestReceiver* self);
static void printtup_setup(DestReceiver * self, TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self);
static void printtup_cleanup(DestReceiver * self);
/* ----------------------------------------------------------------
* printtup / debugtup support
@ -43,7 +43,7 @@ static void printtup_cleanup(DestReceiver* self);
* ----------------
*/
int
getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
getTypeOutAndElem(Oid type, Oid *typOutput, Oid *typElem)
{
HeapTuple typeTuple;
@ -54,6 +54,7 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
if (HeapTupleIsValid(typeTuple))
{
Form_pg_type pt = (Form_pg_type) GETSTRUCT(typeTuple);
*typOutput = (Oid) pt->typoutput;
*typElem = (Oid) pt->typelem;
return OidIsValid(*typOutput);
@ -70,13 +71,15 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
* Private state for a printtup destination object
* ----------------
*/
typedef struct { /* Per-attribute information */
typedef struct
{ /* Per-attribute information */
Oid typoutput; /* Oid for the attribute's type output fn */
Oid typelem; /* typelem value to pass to the output fn */
FmgrInfo finfo; /* Precomputed call info for typoutput */
} PrinttupAttrInfo;
typedef struct {
typedef struct
{
DestReceiver pub; /* publicly-known function pointers */
TupleDesc attrinfo; /* The attr info we are set up for */
int nattrs;
@ -87,10 +90,10 @@ typedef struct {
* Initialize: create a DestReceiver for printtup
* ----------------
*/
DestReceiver*
DestReceiver *
printtup_create_DR()
{
DR_printtup* self = (DR_printtup*) palloc(sizeof(DR_printtup));
DR_printtup *self = (DR_printtup *) palloc(sizeof(DR_printtup));
self->pub.receiveTuple = printtup;
self->pub.setup = printtup_setup;
@ -100,11 +103,11 @@ printtup_create_DR()
self->nattrs = 0;
self->myinfo = NULL;
return (DestReceiver*) self;
return (DestReceiver *) self;
}
static void
printtup_setup(DestReceiver* self, TupleDesc typeinfo)
printtup_setup(DestReceiver * self, TupleDesc typeinfo)
{
/* ----------------
* We could set up the derived attr info at this time, but we postpone it
@ -120,7 +123,7 @@ printtup_setup(DestReceiver* self, TupleDesc typeinfo)
}
static void
printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
printtup_prepare_info(DR_printtup * myState, TupleDesc typeinfo, int numAttrs)
{
int i;
@ -131,11 +134,12 @@ printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
myState->nattrs = numAttrs;
if (numAttrs <= 0)
return;
myState->myinfo = (PrinttupAttrInfo*)
myState->myinfo = (PrinttupAttrInfo *)
palloc(numAttrs * sizeof(PrinttupAttrInfo));
for (i = 0; i < numAttrs; i++)
{
PrinttupAttrInfo* thisState = myState->myinfo + i;
PrinttupAttrInfo *thisState = myState->myinfo + i;
if (getTypeOutAndElem((Oid) typeinfo->attrs[i]->atttypid,
&thisState->typoutput, &thisState->typelem))
fmgr_info(thisState->typoutput, &thisState->finfo);
@ -147,9 +151,9 @@ printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
* ----------------
*/
static void
printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
DR_printtup *myState = (DR_printtup*) self;
DR_printtup *myState = (DR_printtup *) self;
StringInfoData buf;
int i,
j,
@ -178,7 +182,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
k = 1 << 7;
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
if (! heap_attisnull(tuple, i + 1))
if (!heap_attisnull(tuple, i + 1))
j |= k; /* set bit if not null */
k >>= 1;
if (k == 0) /* end of byte? */
@ -197,7 +201,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
*/
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
PrinttupAttrInfo* thisState = myState->myinfo + i;
PrinttupAttrInfo *thisState = myState->myinfo + i;
attr = heap_getattr(tuple, i + 1, typeinfo, &isnull);
if (isnull)
continue;
@ -223,9 +228,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
* ----------------
*/
static void
printtup_cleanup(DestReceiver* self)
printtup_cleanup(DestReceiver * self)
{
DR_printtup* myState = (DR_printtup*) self;
DR_printtup *myState = (DR_printtup *) self;
if (myState->myinfo)
pfree(myState->myinfo);
pfree(myState);
@ -274,7 +280,7 @@ showatts(char *name, TupleDesc tupleDesc)
* ----------------
*/
void
debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
int i;
Datum attr;
@ -310,7 +316,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
* ----------------
*/
void
printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
StringInfoData buf;
int i,
@ -334,7 +340,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
k = 1 << 7;
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
if (! heap_attisnull(tuple, i + 1))
if (!heap_attisnull(tuple, i + 1))
j |= k; /* set bit if not null */
k >>= 1;
if (k == 0) /* end of byte? */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.13 1999/02/13 23:14:13 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.14 1999/05/25 16:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.48 1999/02/13 23:14:14 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.49 1999/05/25 16:06:42 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be

View File

@ -344,7 +344,7 @@ gistinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
/*
* Notes in ExecUtils:ExecOpenIndices()
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
res = gistdoinsert(r, itup, &giststate);
@ -1106,10 +1106,10 @@ gistdelete(Relation r, ItemPointer tid)
Page page;
/*
* Notes in ExecUtils:ExecOpenIndices()
* Also note that only vacuum deletes index tuples now...
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
* deletes index tuples now...
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
blkno = ItemPointerGetBlockNumber(tid);

View File

@ -68,7 +68,7 @@ gistbeginscan(Relation r,
/*
* Let index_beginscan does its work...
*
RelationSetLockForRead(r);
* RelationSetLockForRead(r);
*/
s = RelationGetIndexScan(r, fromEnd, nkeys, key);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.25 1999/02/13 23:14:17 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.26 1999/05/25 16:06:54 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -485,7 +485,7 @@ hashrestrpos(IndexScanDesc scan)
/* bump lock on currentMarkData and copy to currentItemData */
if (ItemPointerIsValid(&(scan->currentMarkData)))
{
so->hashso_curbuf =_hash_getbuf(scan->relation,
so->hashso_curbuf = _hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
HASH_READ);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.16 1999/03/14 16:27:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.17 1999/05/25 16:06:56 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -34,9 +34,9 @@ hashint4(uint32 key)
}
uint32
hashint8(int64 *key)
hashint8(int64 * key)
{
return ~((uint32)*key);
return ~((uint32) *key);
}
/* Hash function from Chris Torek. */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.19 1999/02/13 23:14:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.20 1999/05/25 16:06:58 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.42 1999/03/28 20:31:56 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.43 1999/05/25 16:07:04 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -439,7 +439,8 @@ heapgettup(Relation relation,
}
else
{
++lpp; /* move forward in this page's ItemId array */
++lpp; /* move forward in this page's ItemId
* array */
++lineoff;
}
}
@ -816,6 +817,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
}
else
{ /* NONTUP */
/*
* Don't release scan->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
@ -897,6 +899,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
}
else
{ /* NONTUP */
/*
* Don't release scan->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
@ -1093,9 +1096,7 @@ heap_insert(Relation relation, HeapTuple tup)
RelationPutHeapTupleAtEnd(relation, tup);
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
{
RelationInvalidateHeapTuple(relation, tup);
}
return tup->t_data->t_oid;
}
@ -1283,11 +1284,12 @@ l2:
RelationPutHeapTuple(relation, buffer, newtup);
else
{
/*
* New item won't fit on same page as old item, have to look
* for a new place to put it. Note that we have to unlock
* current buffer context - not good but RelationPutHeapTupleAtEnd
* uses extend lock.
* New item won't fit on same page as old item, have to look for a
* new place to put it. Note that we have to unlock current buffer
* context - not good but RelationPutHeapTupleAtEnd uses extend
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
RelationPutHeapTupleAtEnd(relation, newtup);
@ -1295,8 +1297,8 @@ l2:
}
/*
* New item in place, now record address of new tuple in
* t_ctid of old one.
* New item in place, now record address of new tuple in t_ctid of old
* one.
*/
oldtup.t_data->t_ctid = newtup->t_self;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Id: hio.c,v 1.19 1999/05/07 01:22:53 vadim Exp $
* $Id: hio.c,v 1.20 1999/05/25 16:07:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -68,7 +68,7 @@ RelationPutHeapTuple(Relation relation,
/*
* Let the caller do this!
*
WriteBuffer(buffer);
* WriteBuffer(buffer);
*/
/* return an accurate tuple */
@ -111,8 +111,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
Item item;
/*
* Lock relation for extention. We can use LockPage here as long as
* in all other places we use page-level locking for indices only.
* Lock relation for extention. We can use LockPage here as long as in
* all other places we use page-level locking for indices only.
* Alternatevely, we could define pseudo-table as we do for
* transactions with XactLockTable.
*/
@ -132,6 +132,7 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
{
buffer = ReadBuffer(relation, lastblock);
pageHeader = (Page) BufferGetPage(buffer);
/*
* There was IF instead of ASSERT here ?!
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.16 1999/02/13 23:14:29 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.17 1999/05/25 16:07:12 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@ -270,5 +270,5 @@ IndexScanRestorePosition(IndexScanDesc scan)
scan->flags = 0x0; /* XXX should have a symbolic name */
}
#endif
#endif

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.31 1999/02/13 23:14:30 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.32 1999/05/25 16:07:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.22 1999/03/14 05:08:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.23 1999/05/25 16:07:21 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -40,7 +40,7 @@ btint4cmp(int32 a, int32 b)
}
int32
btint8cmp(int64 *a, int64 *b)
btint8cmp(int64 * a, int64 * b)
{
if (*a > *b)
return 1;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.39 1999/05/01 16:09:45 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.40 1999/05/25 16:07:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -122,9 +122,10 @@ l1:
*/
while (_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
{ /* they're equal */
/*
* Have to check is inserted heap tuple deleted one
* (i.e. just moved to another place by vacuum)!
* Have to check is inserted heap tuple deleted one (i.e.
* just moved to another place by vacuum)!
*/
if (chtup)
{
@ -156,7 +157,7 @@ l1:
_bt_relbuf(rel, nbuf, BT_READ);
_bt_relbuf(rel, buf, BT_WRITE);
XactLockTableWait(xwait);
goto l1; /* continue from the begin */
goto l1;/* continue from the begin */
}
elog(ERROR, "Cannot insert a duplicate key into a unique index");
}
@ -571,7 +572,7 @@ _bt_insertonpg(Relation rel,
* reasoning).
*/
l_spl:;
l_spl: ;
if (stack == (BTStack) NULL)
{
if (!is_root) /* if this page was not root page */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.20 1999/04/22 08:19:59 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.21 1999/05/25 16:07:26 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -497,14 +497,13 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
if (stack->bts_offset == InvalidOffsetNumber ||
maxoff >= stack->bts_offset)
{
/*
* _bt_insertonpg set bts_offset to InvalidOffsetNumber
* in the case of concurrent ROOT page split
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
* case of concurrent ROOT page split
*/
if (stack->bts_offset == InvalidOffsetNumber)
{
i = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
}
else
{
itemid = PageGetItemId(page, stack->bts_offset);
@ -524,7 +523,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
}
/* if the item has just moved right on this page, we're done */
for ( ;
for (;
i <= maxoff;
i = OffsetNumberNext(i))
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.37 1999/03/28 20:31:58 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.38 1999/05/25 16:07:27 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -391,9 +391,10 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
/*
* Restore scan position using heap TID returned
* by previous call to btgettuple().
* Restore scan position using heap TID returned by previous call
* to btgettuple().
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@ -623,10 +624,9 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
* We use this as flag when first index tuple on page
* is deleted but we do not move left (this would
* slowdown vacuum) - so we set current->ip_posid
* before first index tuple on the current page
* We use this as flag when first index tuple on page is deleted but
* we do not move left (this would slowdown vacuum) - so we set
* current->ip_posid before first index tuple on the current page
* (_bt_step will move it right)...
*/
if (!ItemPointerIsValid(&target))

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.20 1999/03/28 20:31:58 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.21 1999/05/25 16:07:29 momjian Exp $
*
*
* NOTES

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.43 1999/04/13 17:18:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.44 1999/05/25 16:07:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -706,7 +706,7 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
so = (BTScanOpaque) scan->opaque;
current = &(scan->currentItemData);
Assert (BufferIsValid(so->btso_curbuf));
Assert(BufferIsValid(so->btso_curbuf));
/* we still have the buffer pinned and locked */
buf = so->btso_curbuf;
@ -795,14 +795,14 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (ScanDirectionIsBackward(dir))
{
for (i=0; i<so->numberOfKeys; i++)
for (i = 0; i < so->numberOfKeys; i++)
{
if (so->keyData[i].sk_attno != 1)
break;
strat = _bt_getstrat(rel, so->keyData[i].sk_attno,
so->keyData[i].sk_procedure);
if (strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber||
strat == BTLessEqualStrategyNumber ||
strat == BTEqualStrategyNumber)
{
nKeyIndex = i;
@ -1104,9 +1104,10 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
rel = scan->relation;
current = &(scan->currentItemData);
/*
* Don't use ItemPointerGetOffsetNumber or you risk to get
* assertion due to ability of ip_posid to be equal 0.
* Don't use ItemPointerGetOffsetNumber or you risk to get assertion
* due to ability of ip_posid to be equal 0.
*/
offnum = current->ip_posid;
page = BufferGetPage(*bufP);

View File

@ -5,7 +5,7 @@
*
*
* IDENTIFICATION
* $Id: nbtsort.c,v 1.38 1999/05/09 00:53:19 tgl Exp $
* $Id: nbtsort.c,v 1.39 1999/05/25 16:07:34 momjian Exp $
*
* NOTES
*
@ -552,8 +552,8 @@ _bt_spoolinit(Relation index, int ntapes, bool isunique)
btspool->bts_tape = 0;
btspool->isunique = isunique;
btspool->bts_itape =(BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_otape =(BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_itape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_otape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
if (btspool->bts_itape == (BTTapeBlock **) NULL ||
btspool->bts_otape == (BTTapeBlock **) NULL)
elog(ERROR, "_bt_spoolinit: out of memory");

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.31 1999/02/13 23:14:42 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.32 1999/05/25 16:07:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -307,7 +307,7 @@ rtinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation he
/*
* Notes in ExecUtils:ExecOpenIndices()
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
res = rtdoinsert(r, itup, &rtState);
@ -947,10 +947,10 @@ rtdelete(Relation r, ItemPointer tid)
Page page;
/*
* Notes in ExecUtils:ExecOpenIndices()
* Also note that only vacuum deletes index tuples now...
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
* deletes index tuples now...
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
blkno = ItemPointerGetBlockNumber(tid);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.22 1999/02/13 23:14:43 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.23 1999/05/25 16:07:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -69,7 +69,7 @@ rtbeginscan(Relation r,
/*
* Let index_beginscan does its work...
*
RelationSetLockForRead(r);
* RelationSetLockForRead(r);
*/
s = RelationGetIndexScan(r, fromEnd, nkeys, key);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.25 1999/03/30 01:37:21 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.26 1999/05/25 16:07:45 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@ -221,7 +221,7 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
/*
* update (invalidate) our single item TransactionLogTest cache.
*
if (status != XID_COMMIT)
* if (status != XID_COMMIT)
*
* What's the hell ?! Why != XID_COMMIT ?!
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.19 1999/02/13 23:14:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.20 1999/05/25 16:07:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.35 1999/05/13 00:34:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.36 1999/05/25 16:07:50 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@ -299,6 +299,7 @@ IsTransactionState(void)
*/
return false;
}
#endif
/* --------------------------------
@ -1509,6 +1510,7 @@ AbortOutOfAnyTransaction()
*/
if (s->state != TRANS_DEFAULT)
AbortTransaction();
/*
* Now reset the high-level state
*/

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: xid.c,v 1.21 1999/02/13 23:14:49 momjian Exp $
* $Id: xid.c,v 1.22 1999/05/25 16:07:52 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING

View File

@ -7,7 +7,7 @@
* Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.59 1999/05/10 00:44:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.60 1999/05/25 16:07:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -587,7 +587,9 @@ DefineAttr(char *name, char *type, int attnum)
printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attlen = attrtypes[attnum]->attlen = Procid[typeoid].len;
/* Cheat like mad to fill in these items from the length only.
/*
* Cheat like mad to fill in these items from the length only.
* This only has to work for types used in the system catalogs...
*/
switch (attlen)

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.20 1999/02/13 23:14:55 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.21 1999/05/25 16:08:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.84 1999/05/22 04:12:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.85 1999/05/25 16:08:03 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -240,9 +240,7 @@ heap_create(char *relname,
nailme = true;
}
else
{
relid = newoid();
}
if (isnoname)
{
@ -719,6 +717,7 @@ AddNewRelationTuple(Relation pg_class_desc,
if (!isBootstrap)
{
/*
* First, open the catalog indices and insert index tuples for the
* new relation.
@ -814,6 +813,7 @@ heap_create_with_catalog(char *relname,
if (relid != InvalidOid)
{
/*
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
@ -1516,7 +1516,9 @@ StoreAttrDefault(Relation rel, AttrDefault *attrdef)
extern GlobalMemory CacheCxt;
start:
/* Surround table name with double quotes to allow mixed-case and
/*
* Surround table name with double quotes to allow mixed-case and
* whitespaces in names. - BGA 1998-11-14
*/
snprintf(str, MAX_PARSE_BUFFER,
@ -1541,14 +1543,14 @@ start:
if (IS_BINARY_COMPATIBLE(type, atp->atttypid))
; /* use without change */
else if (can_coerce_type(1, &(type), &(atp->atttypid)))
expr = coerce_type(NULL, (Node *)expr, type, atp->atttypid,
expr = coerce_type(NULL, (Node *) expr, type, atp->atttypid,
atp->atttypmod);
else if (IsA(expr, Const))
{
if (*cast != 0)
elog(ERROR, "DEFAULT clause const type '%s' mismatched with column type '%s'",
typeidTypeName(type), typeidTypeName(atp->atttypid));
snprintf(cast, 2*NAMEDATALEN, ":: %s", typeidTypeName(atp->atttypid));
snprintf(cast, 2 * NAMEDATALEN, ":: %s", typeidTypeName(atp->atttypid));
goto start;
}
else
@ -1598,7 +1600,8 @@ StoreRelCheck(Relation rel, ConstrCheck *check)
char nulls[4] = {' ', ' ', ' ', ' '};
extern GlobalMemory CacheCxt;
/* Check for table's existance. Surround table name with double-quotes
/*
* Check for table's existance. Surround table name with double-quotes
* to allow mixed-case and whitespace names. - thomas 1998-11-12
*/
snprintf(str, MAX_PARSE_BUFFER,

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.74 1999/05/17 00:27:45 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.75 1999/05/25 16:08:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -686,7 +686,7 @@ UpdateIndexRelation(Oid indexoid,
predLen = VARSIZE(predText);
itupLen = predLen + sizeof(FormData_pg_index);
indexForm = (Form_pg_index) palloc(itupLen);
memset (indexForm, 0, sizeof(FormData_pg_index));
memset(indexForm, 0, sizeof(FormData_pg_index));
memmove((char *) &indexForm->indpred, (char *) predText, predLen);
@ -991,6 +991,7 @@ index_create(char *heapRelationName,
if (relid != InvalidOid)
{
/*
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
@ -1005,7 +1006,8 @@ index_create(char *heapRelationName,
{
temp_relname = pstrdup(indexRelationName); /* save original value */
indexRelationName = palloc(NAMEDATALEN);
strcpy(indexRelationName, temp_relname); /* heap_create will change this */
strcpy(indexRelationName, temp_relname); /* heap_create will
* change this */
}
/* ----------------

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.37 1999/05/10 00:44:55 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.38 1999/05/25 16:08:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -47,14 +47,14 @@
char *Name_pg_attr_indices[Num_pg_attr_indices] = {AttributeNameIndex,
AttributeNumIndex,
AttributeRelidIndex};
AttributeRelidIndex};
char *Name_pg_proc_indices[Num_pg_proc_indices] = {ProcedureNameIndex,
ProcedureOidIndex,
ProcedureSrcIndex};
ProcedureSrcIndex};
char *Name_pg_type_indices[Num_pg_type_indices] = {TypeNameIndex,
TypeOidIndex};
TypeOidIndex};
char *Name_pg_class_indices[Num_pg_class_indices] = {ClassNameIndex,
ClassOidIndex};
ClassOidIndex};
char *Name_pg_attrdef_indices[Num_pg_attrdef_indices] = {AttrDefaultIndex};
char *Name_pg_relcheck_indices[Num_pg_relcheck_indices] = {RelCheckIndex};

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.36 1999/05/10 00:44:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.37 1999/05/25 16:08:09 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@ -135,6 +135,7 @@ OperatorGetWithOpenRelation(Relation pg_operator_desc,
if (HeapTupleIsValid(tup))
{
regproc oprcode = ((Form_pg_operator) GETSTRUCT(tup))->oprcode;
operatorObjectId = tup->t_data->t_oid;
*defined = RegProcedureIsValid(oprcode);
}
@ -506,8 +507,9 @@ OperatorDef(char *operatorName,
elog(ERROR, "OperatorDef: operator \"%s\" already defined",
operatorName);
/* At this point, if operatorObjectId is not InvalidOid then
* we are filling in a previously-created shell.
/*
* At this point, if operatorObjectId is not InvalidOid then we are
* filling in a previously-created shell.
*/
/* ----------------
@ -648,7 +650,8 @@ OperatorDef(char *operatorName,
values[i++] = ObjectIdGetDatum(leftTypeId);
values[i++] = ObjectIdGetDatum(rightTypeId);
++i; /* Skip "oprresult", it was filled in above */
++i; /* Skip "oprresult", it was filled in
* above */
/*
* Set up the other operators. If they do not currently exist, create
@ -704,7 +707,8 @@ OperatorDef(char *operatorName,
otherRightTypeName,
&otherDefined);
break;
case 3: /* right sort op takes right-side data type */
case 3: /* right sort op takes right-side data
* type */
otherLeftTypeName = rightTypeName;
otherRightTypeName = rightTypeName;
otherLeftTypeId = rightTypeId;
@ -737,8 +741,10 @@ OperatorDef(char *operatorName,
}
else
{
/* self-linkage to this operator; will fix below.
* Note that only self-linkage for commutation makes sense.
/*
* self-linkage to this operator; will fix below. Note
* that only self-linkage for commutation makes sense.
*/
if (j != 0)
elog(ERROR,
@ -804,15 +810,14 @@ OperatorDef(char *operatorName,
/*
* If a commutator and/or negator link is provided, update the other
* operator(s) to point at this one, if they don't already have a link.
* This supports an alternate style of operator definition wherein the
* user first defines one operator without giving negator or
* commutator, then defines the other operator of the pair with the
* operator(s) to point at this one, if they don't already have a
* link. This supports an alternate style of operator definition
* wherein the user first defines one operator without giving negator
* or commutator, then defines the other operator of the pair with the
* proper commutator or negator attribute. That style doesn't require
* creation of a shell, and it's the only style that worked right before
* Postgres version 6.5.
* This code also takes care of the situation where the new operator
* is its own commutator.
* creation of a shell, and it's the only style that worked right
* before Postgres version 6.5. This code also takes care of the
* situation where the new operator is its own commutator.
*/
if (selfCommutator)
commutatorId = operatorObjectId;
@ -869,7 +874,8 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
tup = heap_getnext(pg_operator_scan, 0);
/* if the commutator and negator are the same operator, do one update.
/*
* if the commutator and negator are the same operator, do one update.
* XXX this is probably useless code --- I doubt it ever makes sense
* for commutator and negator to be the same thing...
*/
@ -1008,7 +1014,7 @@ OperatorCreate(char *operatorName,
if (!leftTypeName && !rightTypeName)
elog(ERROR, "OperatorCreate: at least one of leftarg or rightarg must be defined");
if (! (leftTypeName && rightTypeName))
if (!(leftTypeName && rightTypeName))
{
/* If it's not a binary op, these things mustn't be set: */
if (commutatorName)

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.28 1999/05/13 07:28:27 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.29 1999/05/25 16:08:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -226,11 +226,11 @@ ProcedureCreate(char *procedureName,
* function name (the 'prosrc' value) is a known builtin function.
*
* NOTE: in Postgres versions before 6.5, the SQL name of the created
* function could not be different from the internal name, and 'prosrc'
* wasn't used. So there is code out there that does CREATE FUNCTION
* xyz AS '' LANGUAGE 'internal'. To preserve some modicum of
* backwards compatibility, accept an empty 'prosrc' value as meaning
* the supplied SQL function name.
* function could not be different from the internal name, and
* 'prosrc' wasn't used. So there is code out there that does CREATE
* FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some modicum
* of backwards compatibility, accept an empty 'prosrc' value as
* meaning the supplied SQL function name.
*/
if (strcmp(languageName, "internal") == 0)

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.36 1999/04/20 03:51:14 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.37 1999/05/25 16:08:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -400,8 +400,8 @@ TypeCreate(char *typeName,
procname = procs[j];
/*
* First look for a 1-argument func with all argtypes 0.
* This is valid for all four kinds of procedure.
* First look for a 1-argument func with all argtypes 0. This is
* valid for all four kinds of procedure.
*/
MemSet(argList, 0, 8 * sizeof(Oid));
@ -413,20 +413,23 @@ TypeCreate(char *typeName,
if (!HeapTupleIsValid(tup))
{
/*
* For array types, the input procedures may take 3 args
* (data value, element OID, atttypmod); the pg_proc
* argtype signature is 0,0,INT4OID. The output procedures
* may take 2 args (data value, element OID).
* For array types, the input procedures may take 3 args (data
* value, element OID, atttypmod); the pg_proc argtype
* signature is 0,0,INT4OID. The output procedures may take 2
* args (data value, element OID).
*/
if (OidIsValid(elementObjectId))
{
int nargs;
if (j % 2)
{
/* output proc */
nargs = 2;
} else
}
else
{
/* input proc */
nargs = 3;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.2 1999/03/16 04:25:46 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.3 1999/05/25 16:08:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -9,7 +9,7 @@
* doesn't work! - jolly 8/19/95
*
*
* $Id: version.c,v 1.18 1999/02/13 23:15:12 momjian Exp $
* $Id: version.c,v 1.19 1999/05/25 16:08:32 momjian Exp $
*
* NOTES
* At the point the version is defined, 2 physical relations are created

View File

@ -6,7 +6,7 @@
* Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.46 1999/04/25 19:27:43 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.47 1999/05/25 16:08:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -164,22 +164,23 @@ Async_Notify(char *relname)
/*
* We allocate list memory from the global malloc pool to ensure that
* it will live until we want to use it. This is probably not necessary
* any longer, since we will use it before the end of the transaction.
* DLList only knows how to use malloc() anyway, but we could probably
* palloc() the strings...
* it will live until we want to use it. This is probably not
* necessary any longer, since we will use it before the end of the
* transaction. DLList only knows how to use malloc() anyway, but we
* could probably palloc() the strings...
*/
if (!pendingNotifies)
pendingNotifies = DLNewList();
notifyName = strdup(relname);
DLAddHead(pendingNotifies, DLNewElem(notifyName));
/*
* NOTE: we could check to see if pendingNotifies already has an entry
* for relname, and thus avoid making duplicate entries. However, most
* apps probably don't notify the same name multiple times per transaction,
* so we'd likely just be wasting cycles to make such a check.
* AsyncExistsPendingNotify() doesn't really care whether the list
* contains duplicates...
* for relname, and thus avoid making duplicate entries. However,
* most apps probably don't notify the same name multiple times per
* transaction, so we'd likely just be wasting cycles to make such a
* check. AsyncExistsPendingNotify() doesn't really care whether the
* list contains duplicates...
*/
}
@ -274,7 +275,7 @@ Async_Listen(char *relname, int pid)
/*
* now that we are listening, make sure we will unlisten before dying.
*/
if (! unlistenExitRegistered)
if (!unlistenExitRegistered)
{
if (on_shmem_exit(Async_UnlistenOnExit, (caddr_t) NULL) < 0)
elog(NOTICE, "Async_Listen: out of shmem_exit slots");
@ -326,7 +327,9 @@ Async_Unlisten(char *relname, int pid)
UnlockRelation(lRel, AccessExclusiveLock);
heap_close(lRel);
}
/* We do not complain about unlistening something not being listened;
/*
* We do not complain about unlistening something not being listened;
* should we?
*/
}
@ -398,11 +401,12 @@ Async_UnlistenAll()
static void
Async_UnlistenOnExit()
{
/*
* We need to start/commit a transaction for the unlisten,
* but if there is already an active transaction we had better
* abort that one first. Otherwise we'd end up committing changes
* that probably ought to be discarded.
* We need to start/commit a transaction for the unlisten, but if
* there is already an active transaction we had better abort that one
* first. Otherwise we'd end up committing changes that probably
* ought to be discarded.
*/
AbortOutOfAnyTransaction();
/* Now we can do the unlisten */
@ -450,12 +454,14 @@ AtCommit_Notify()
int32 listenerPID;
if (!pendingNotifies)
return; /* no NOTIFY statements in this transaction */
return; /* no NOTIFY statements in this
* transaction */
/* NOTIFY is disabled if not normal processing mode.
* This test used to be in xact.c, but it seems cleaner to do it here.
/*
* NOTIFY is disabled if not normal processing mode. This test used to
* be in xact.c, but it seems cleaner to do it here.
*/
if (! IsNormalProcessingMode())
if (!IsNormalProcessingMode())
{
ClearPendingNotifies();
return;
@ -487,10 +493,13 @@ AtCommit_Notify()
if (listenerPID == MyProcPid)
{
/* Self-notify: no need to bother with table update.
/*
* Self-notify: no need to bother with table update.
* Indeed, we *must not* clear the notification field in
* this path, or we could lose an outside notify, which'd be
* bad for applications that ignore self-notify messages.
* this path, or we could lose an outside notify, which'd
* be bad for applications that ignore self-notify
* messages.
*/
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying self");
NotifyMyFrontEnd(relname, listenerPID);
@ -499,23 +508,27 @@ AtCommit_Notify()
{
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying pid %d",
listenerPID);
/*
* If someone has already notified this listener,
* we don't bother modifying the table, but we do still send
* a SIGUSR2 signal, just in case that backend missed the
* earlier signal for some reason. It's OK to send the signal
* first, because the other guy can't read pg_listener until
* we unlock it.
* If someone has already notified this listener, we don't
* bother modifying the table, but we do still send a
* SIGUSR2 signal, just in case that backend missed the
* earlier signal for some reason. It's OK to send the
* signal first, because the other guy can't read
* pg_listener until we unlock it.
*/
#ifdef HAVE_KILL
if (kill(listenerPID, SIGUSR2) < 0)
{
/* Get rid of pg_listener entry if it refers to a PID
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend
* crashed without deleting its pg_listener entries.
* This code used to only delete the entry if errno==ESRCH,
* but as far as I can see we should just do it for any
* failure (certainly at least for EPERM too...)
* This code used to only delete the entry if
* errno==ESRCH, but as far as I can see we should
* just do it for any failure (certainly at least for
* EPERM too...)
*/
heap_delete(lRel, &lTuple->t_self, NULL);
}
@ -536,6 +549,7 @@ AtCommit_Notify()
}
heap_endscan(sRel);
/*
* We do not do RelationUnsetLockForWrite(lRel) here, because the
* transaction is about to be committed anyway.
@ -588,25 +602,30 @@ AtAbort_Notify()
void
Async_NotifyHandler(SIGNAL_ARGS)
{
/*
* Note: this is a SIGNAL HANDLER. You must be very wary what you do here.
* Some helpful soul had this routine sprinkled with TPRINTFs, which would
* likely lead to corruption of stdio buffers if they were ever turned on.
* Note: this is a SIGNAL HANDLER. You must be very wary what you do
* here. Some helpful soul had this routine sprinkled with TPRINTFs,
* which would likely lead to corruption of stdio buffers if they were
* ever turned on.
*/
if (notifyInterruptEnabled)
{
/* I'm not sure whether some flavors of Unix might allow another
* SIGUSR2 occurrence to recursively interrupt this routine.
* To cope with the possibility, we do the same sort of dance that
* EnableNotifyInterrupt must do --- see that routine for comments.
/*
* I'm not sure whether some flavors of Unix might allow another
* SIGUSR2 occurrence to recursively interrupt this routine. To
* cope with the possibility, we do the same sort of dance that
* EnableNotifyInterrupt must do --- see that routine for
* comments.
*/
notifyInterruptEnabled = 0; /* disable any recursive signal */
notifyInterruptOccurred = 1; /* do at least one iteration */
for (;;)
{
notifyInterruptEnabled = 1;
if (! notifyInterruptOccurred)
if (!notifyInterruptOccurred)
break;
notifyInterruptEnabled = 0;
if (notifyInterruptOccurred)
@ -621,7 +640,11 @@ Async_NotifyHandler(SIGNAL_ARGS)
}
else
{
/* In this path it is NOT SAFE to do much of anything, except this: */
/*
* In this path it is NOT SAFE to do much of anything, except
* this:
*/
notifyInterruptOccurred = 1;
}
}
@ -653,25 +676,26 @@ EnableNotifyInterrupt(void)
* could fail to respond promptly to a signal that happens in between
* those two steps. (A very small time window, perhaps, but Murphy's
* Law says you can hit it...) Instead, we first set the enable flag,
* then test the occurred flag. If we see an unserviced interrupt
* has occurred, we re-clear the enable flag before going off to do
* the service work. (That prevents re-entrant invocation of
* ProcessIncomingNotify() if another interrupt occurs.)
* If an interrupt comes in between the setting and clearing of
* notifyInterruptEnabled, then it will have done the service
* work and left notifyInterruptOccurred zero, so we have to check
* again after clearing enable. The whole thing has to be in a loop
* in case another interrupt occurs while we're servicing the first.
* Once we get out of the loop, enable is set and we know there is no
* then test the occurred flag. If we see an unserviced interrupt has
* occurred, we re-clear the enable flag before going off to do the
* service work. (That prevents re-entrant invocation of
* ProcessIncomingNotify() if another interrupt occurs.) If an
* interrupt comes in between the setting and clearing of
* notifyInterruptEnabled, then it will have done the service work and
* left notifyInterruptOccurred zero, so we have to check again after
* clearing enable. The whole thing has to be in a loop in case
* another interrupt occurs while we're servicing the first. Once we
* get out of the loop, enable is set and we know there is no
* unserviced interrupt.
*
* NB: an overenthusiastic optimizing compiler could easily break this
* code. Hopefully, they all understand what "volatile" means these days.
* code. Hopefully, they all understand what "volatile" means these
* days.
*/
for (;;)
{
notifyInterruptEnabled = 1;
if (! notifyInterruptOccurred)
if (!notifyInterruptOccurred)
break;
notifyInterruptEnabled = 0;
if (notifyInterruptOccurred)
@ -777,6 +801,7 @@ ProcessIncomingNotify(void)
}
}
heap_endscan(sRel);
/*
* We do not do RelationUnsetLockForWrite(lRel) here, because the
* transaction is about to be committed anyway.
@ -785,7 +810,10 @@ ProcessIncomingNotify(void)
CommitTransactionCommand();
/* Must flush the notify messages to ensure frontend gets them promptly. */
/*
* Must flush the notify messages to ensure frontend gets them
* promptly.
*/
pq_flush();
PS_SET_STATUS("idle");
@ -800,20 +828,22 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID)
if (whereToSendOutput == Remote)
{
StringInfoData buf;
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'A');
pq_sendint(&buf, listenerPID, sizeof(int32));
pq_sendstring(&buf, relname);
pq_endmessage(&buf);
/* NOTE: we do not do pq_flush() here. For a self-notify, it will
/*
* NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
* ProcessIncomingNotify will do it after finding all the notifies.
* ProcessIncomingNotify will do it after finding all the
* notifies.
*/
}
else
{
elog(NOTICE, "NOTIFY for %s", relname);
}
}
/* Does pendingNotifies include the given relname?
@ -847,10 +877,12 @@ ClearPendingNotifies()
if (pendingNotifies)
{
/* Since the referenced strings are malloc'd, we have to scan the
/*
* Since the referenced strings are malloc'd, we have to scan the
* list and delete them individually. If we used palloc for the
* strings then we could just do DLFreeList to get rid of both
* the list nodes and the list base...
* strings then we could just do DLFreeList to get rid of both the
* list nodes and the list base...
*/
while ((p = DLRemHead(pendingNotifies)) != NULL)
{

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.38 1999/02/13 23:15:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.39 1999/05/25 16:08:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.44 1999/05/10 00:44:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.45 1999/05/25 16:08:17 momjian Exp $
*
* NOTES
* The PortalExecutorHeapMemory crap needs to be eliminated
@ -124,7 +124,7 @@ PerformPortalFetch(char *name,
limcount.type = T_Const;
limcount.consttype = INT4OID;
limcount.constlen = sizeof(int4);
limcount.constvalue = (Datum)count;
limcount.constvalue = (Datum) count;
limcount.constisnull = FALSE;
limcount.constbyval = TRUE;
limcount.constisset = FALSE;
@ -194,7 +194,7 @@ PerformPortalFetch(char *name,
PortalExecutorHeapMemory = (MemoryContext) PortalGetHeapMemory(portal);
ExecutorRun(queryDesc, PortalGetState(portal), feature,
(Node *)NULL, (Node *)&limcount);
(Node *) NULL, (Node *) &limcount);
if (dest == None) /* MOVE */
pfree(queryDesc);
@ -519,7 +519,7 @@ PerformAddAttribute(char *relationName,
}
void
LockTableCommand(LockStmt *lockstmt)
LockTableCommand(LockStmt * lockstmt)
{
Relation rel;
int aclresult;

View File

@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.76 1999/05/10 00:44:58 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.77 1999/05/25 16:08:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -92,19 +92,25 @@ inline void CopyDonePeek(FILE *fp, int c, int pickup);
*
* NB: no data conversion is applied by these functions
*/
inline void CopySendData(void *databuf, int datasize, FILE *fp) {
inline void
CopySendData(void *databuf, int datasize, FILE *fp)
{
if (!fp)
pq_putbytes((char*) databuf, datasize);
pq_putbytes((char *) databuf, datasize);
else
fwrite(databuf, datasize, 1, fp);
}
inline void CopySendString(char *str, FILE *fp) {
CopySendData(str,strlen(str),fp);
inline void
CopySendString(char *str, FILE *fp)
{
CopySendData(str, strlen(str), fp);
}
inline void CopySendChar(char c, FILE *fp) {
CopySendData(&c,1,fp);
inline void
CopySendChar(char c, FILE *fp)
{
CopySendData(&c, 1, fp);
}
/*
@ -117,18 +123,23 @@ inline void CopySendChar(char c, FILE *fp) {
*
* NB: no data conversion is applied by these functions
*/
inline void CopyGetData(void *databuf, int datasize, FILE *fp) {
inline void
CopyGetData(void *databuf, int datasize, FILE *fp)
{
if (!fp)
pq_getbytes((char*) databuf, datasize);
pq_getbytes((char *) databuf, datasize);
else
fread(databuf, datasize, 1, fp);
}
inline int CopyGetChar(FILE *fp) {
inline int
CopyGetChar(FILE *fp)
{
if (!fp)
{
unsigned char ch;
if (pq_getbytes((char*) &ch, 1))
if (pq_getbytes((char *) &ch, 1))
return EOF;
return ch;
}
@ -136,9 +147,12 @@ inline int CopyGetChar(FILE *fp) {
return getc(fp);
}
inline int CopyGetEof(FILE *fp) {
inline int
CopyGetEof(FILE *fp)
{
if (!fp)
return 0; /* Never return EOF when talking to frontend ? */
return 0; /* Never return EOF when talking to
* frontend ? */
else
return feof(fp);
}
@ -150,26 +164,39 @@ inline int CopyGetEof(FILE *fp) {
* CopyDonePeek will either take the peeked char off the steam
* (if pickup is != 0) or leave it on the stream (if pickup == 0)
*/
inline int CopyPeekChar(FILE *fp) {
inline int
CopyPeekChar(FILE *fp)
{
if (!fp)
return pq_peekbyte();
else
return getc(fp);
}
inline void CopyDonePeek(FILE *fp, int c, int pickup) {
if (!fp) {
if (pickup) {
/* We want to pick it up - just receive again into dummy buffer */
inline void
CopyDonePeek(FILE *fp, int c, int pickup)
{
if (!fp)
{
if (pickup)
{
/*
* We want to pick it up - just receive again into dummy
* buffer
*/
char c;
pq_getbytes(&c, 1);
}
/* If we didn't want to pick it up, just leave it where it sits */
}
else {
if (!pickup) {
else
{
if (!pickup)
{
/* We don't want to pick it up - so put it back in there */
ungetc(c,fp);
ungetc(c, fp);
}
/* If we wanted to pick it up, it's already there */
}
@ -317,7 +344,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
else if (!from)
{
if (!binary)
CopySendData("\\.\n",3,fp);
CopySendData("\\.\n", 3, fp);
if (IsUnderPostmaster)
pq_endcopyout(false);
}
@ -395,8 +422,8 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
if (oids && !binary)
{
CopySendString(oidout(tuple->t_data->t_oid),fp);
CopySendChar(delim[0],fp);
CopySendString(oidout(tuple->t_data->t_oid), fp);
CopySendChar(delim[0], fp);
}
for (i = 0; i < attr_count; i++)
@ -777,7 +804,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
else if (nulls[i] != 'n')
{
ptr = (char *)att_align(ptr, attr[i]->attlen, attr[i]->attalign);
ptr = (char *) att_align(ptr, attr[i]->attlen, attr[i]->attalign);
values[i] = (Datum) ptr;
ptr = att_addlength(ptr, attr[i]->attlen, ptr);
}
@ -1176,26 +1203,29 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
if (ISOCTAL(c))
{
val = (val << 3) + VALUE(c);
CopyDonePeek(fp, c, 1); /* Pick up the character! */
CopyDonePeek(fp, c, 1); /* Pick up the
* character! */
c = CopyPeekChar(fp);
if (ISOCTAL(c)) {
CopyDonePeek(fp,c,1); /* pick up! */
if (ISOCTAL(c))
{
CopyDonePeek(fp, c, 1); /* pick up! */
val = (val << 3) + VALUE(c);
}
else
{
if (CopyGetEof(fp)) {
CopyDonePeek(fp,c,1); /* pick up */
if (CopyGetEof(fp))
{
CopyDonePeek(fp, c, 1); /* pick up */
return NULL;
}
CopyDonePeek(fp,c,0); /* Return to stream! */
CopyDonePeek(fp, c, 0); /* Return to stream! */
}
}
else
{
if (CopyGetEof(fp))
return NULL;
CopyDonePeek(fp,c,0); /* Return to stream! */
CopyDonePeek(fp, c, 0); /* Return to stream! */
}
c = val & 0377;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.40 1999/02/13 23:15:05 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.41 1999/05/25 16:08:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.34 1999/05/10 00:44:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.35 1999/05/25 16:08:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -124,7 +124,7 @@ destroydb(char *dbname, CommandDest dest)
*/
snprintf(buf, 512,
"delete from pg_database where pg_database.oid = \'%u\'::oid", db_id);
pg_exec_query_dest(buf ,dest, false);
pg_exec_query_dest(buf, dest, false);
/* drop pages for this database that are in the shared buffer cache */
DropBuffers(db_id);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.28 1999/04/09 22:35:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.29 1999/05/25 16:08:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the

View File

@ -4,7 +4,7 @@
*
* Copyright (c) 1994-5, Regents of the University of California
*
* $Id: explain.c,v 1.36 1999/05/09 23:31:45 tgl Exp $
* $Id: explain.c,v 1.37 1999/05/25 16:08:23 momjian Exp $
*
*/
#include <stdio.h>
@ -34,7 +34,7 @@ typedef struct ExplainState
} ExplainState;
static char *Explain_PlanToString(Plan *plan, ExplainState *es);
static void printLongNotice(const char * header, const char * message);
static void printLongNotice(const char *header, const char *message);
static void ExplainOneQuery(Query *query, bool verbose, CommandDest dest);
@ -208,13 +208,11 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
case T_IndexScan:
appendStringInfo(str, " using ");
i = 0;
foreach (l, ((IndexScan *) plan)->indxid)
foreach(l, ((IndexScan *) plan)->indxid)
{
relation = RelationIdCacheGetRelation((int) lfirst(l));
if (++i > 1)
{
appendStringInfo(str, ", ");
}
appendStringInfo(str,
stringStringInfo((RelationGetRelationName(relation))->data));
}
@ -249,17 +247,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
List *lst;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " InitPlan\n");
foreach(lst, plan->initPlan)
{
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 2, es);
}
@ -270,9 +264,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (outerPlan(plan))
{
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, outerPlan(plan), indent + 3, es);
}
@ -281,9 +273,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (innerPlan(plan))
{
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, innerPlan(plan), indent + 3, es);
}
@ -295,17 +285,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
List *lst;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " SubPlan\n");
foreach(lst, plan->subPlan)
{
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 4, es);
}
@ -336,9 +322,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
es->rtable = nth(whichplan, appendplan->unionrtables);
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, subnode, indent + 4, es);
@ -367,7 +351,7 @@ Explain_PlanToString(Plan *plan, ExplainState *es)
* This is a crock ... there shouldn't be an upper limit to what you can elog().
*/
static void
printLongNotice(const char * header, const char * message)
printLongNotice(const char *header, const char *message)
{
int len = strlen(message);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.3 1999/05/10 00:44:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.4 1999/05/25 16:08:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.32 1999/02/13 23:15:08 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.33 1999/05/25 16:08:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.24 1999/05/17 18:24:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.25 1999/05/25 16:08:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -237,7 +237,7 @@ renamerel(char *oldrelname, char *newrelname)
{
sprintf(toldpath, "%s.%d", oldpath, i);
sprintf(tnewpath, "%s.%d", newpath, i);
if(rename(toldpath, tnewpath) < 0)
if (rename(toldpath, tnewpath) < 0)
break;
}

View File

@ -218,8 +218,8 @@ nextval(struct varlena * seqin)
return elm->last;
}
seq = read_info("nextval", elm, &buf); /* lock page' buffer and read
* tuple */
seq = read_info("nextval", elm, &buf); /* lock page' buffer and
* read tuple */
next = result = seq->last_value;
incby = seq->increment_by;
@ -327,8 +327,8 @@ setval(struct varlena * seqin, int4 next)
/* open and AccessShareLock sequence */
elm = init_sequence("setval", seqname);
seq = read_info("setval", elm, &buf); /* lock page' buffer and read
* tuple */
seq = read_info("setval", elm, &buf); /* lock page' buffer and
* read tuple */
if (seq->cache_value != 1)
{

View File

@ -742,8 +742,8 @@ ExecBRUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
return NULL;
/*
* In READ COMMITTED isolevel it's possible that newtuple
* was changed due to concurrent update.
* In READ COMMITTED isolevel it's possible that newtuple was changed
* due to concurrent update.
*/
if (newSlot != NULL)
intuple = newtuple = ExecRemoveJunk(estate->es_junkFilter, newSlot);
@ -824,7 +824,7 @@ ltrmark:;
{
case HeapTupleSelfUpdated:
ReleaseBuffer(buffer);
return(NULL);
return (NULL);
case HeapTupleMayBeUpdated:
break;
@ -846,17 +846,17 @@ ltrmark:;
goto ltrmark;
}
}
/*
* if tuple was deleted or PlanQual failed
* for updated tuple - we have not process
* this tuple!
* if tuple was deleted or PlanQual failed for updated
* tuple - we have not process this tuple!
*/
return(NULL);
return (NULL);
default:
ReleaseBuffer(buffer);
elog(ERROR, "Unknown status %u from heap_mark4update", test);
return(NULL);
return (NULL);
}
}
else

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: user.c,v 1.27 1999/04/02 06:16:36 tgl Exp $
* $Id: user.c,v 1.28 1999/05/25 16:08:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -169,8 +169,8 @@ DefineUser(CreateUserStmt *stmt, CommandDest dest)
/*
* Build the insert statement to be executed.
*
* XXX Ugly as this code is, it still fails to cope with ' or \
* in any of the provided strings.
* XXX Ugly as this code is, it still fails to cope with ' or \ in any of
* the provided strings.
*/
snprintf(sql, SQL_LENGTH,
"insert into %s (usename,usesysid,usecreatedb,usetrace,"
@ -272,9 +272,7 @@ AlterUser(AlterUserStmt *stmt, CommandDest dest)
snprintf(sql, SQL_LENGTH, "update %s set", ShadowRelationName);
if (stmt->password)
{
snprintf(sql, SQL_LENGTH, "%s passwd = '%s'", pstrdup(sql), stmt->password);
}
if (stmt->createdb)
{

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.103 1999/05/23 09:10:24 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.104 1999/05/25 16:08:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -222,14 +222,15 @@ vc_shutdown()
{
/* on entry, we are not in a transaction */
/* Flush the init file that relcache.c uses to save startup time.
* The next backend startup will rebuild the init file with up-to-date
* information from pg_class. This lets the optimizer see the stats that
* we've collected for certain critical system indexes. See relcache.c
* for more details.
/*
* Flush the init file that relcache.c uses to save startup time. The
* next backend startup will rebuild the init file with up-to-date
* information from pg_class. This lets the optimizer see the stats
* that we've collected for certain critical system indexes. See
* relcache.c for more details.
*
* Ignore any failure to unlink the file, since it might not be there
* if no backend has been started since the last vacuum...
* Ignore any failure to unlink the file, since it might not be there if
* no backend has been started since the last vacuum...
*/
unlink(RELCACHE_INIT_FILENAME);
@ -799,6 +800,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
}
else if (!TransactionIdIsInProgress(tuple.t_data->t_xmax))
{
/*
* Not Aborted, Not Committed, Not in Progress - so it
* from crashed process. - vadim 06/02/97
@ -812,9 +814,10 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
relname, blkno, offnum, tuple.t_data->t_xmax);
do_shrinking = false;
}
/*
* If tuple is recently deleted then
* we must not remove it from relation.
* If tuple is recently deleted then we must not remove it
* from relation.
*/
if (tupgone && tuple.t_data->t_xmax >= XmaxRecent &&
tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
@ -826,6 +829,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
tuple.t_data->t_infomask |= HEAP_XMAX_COMMITTED;
pgchanged = true;
}
/*
* If we do shrinking and this tuple is updated one
* then remember it to construct updated tuple
@ -962,7 +966,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
if (usable_free_size > 0 && num_vtlinks > 0)
{
qsort((char *) vtlinks, num_vtlinks, sizeof (VTupleLinkData),
qsort((char *) vtlinks, num_vtlinks, sizeof(VTupleLinkData),
vc_cmp_vtlinks);
vacrelstats->vtlinks = vtlinks;
vacrelstats->num_vtlinks = num_vtlinks;
@ -1128,7 +1132,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
else
Assert(!isempty);
chain_tuple_moved = false; /* no one chain-tuple was moved off this page, yet */
chain_tuple_moved = false; /* no one chain-tuple was moved
* off this page, yet */
vpc->vpd_blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
@ -1146,20 +1151,22 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected");
/*
* If this (chain) tuple is moved by me already then
* I have to check is it in vpc or not - i.e. is it
* moved while cleaning this page or some previous one.
* If this (chain) tuple is moved by me already then I
* have to check is it in vpc or not - i.e. is it moved
* while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (keep_tuples == 0)
continue;
if (chain_tuple_moved) /* some chains was moved while */
if (chain_tuple_moved) /* some chains was moved
* while */
{ /* cleaning this page */
Assert(vpc->vpd_offsets_free > 0);
for (i = 0; i < vpc->vpd_offsets_free; i++)
@ -1184,9 +1191,9 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
}
/*
* If this tuple is in the chain of tuples created in
* updates by "recent" transactions then we have to
* move all chain of tuples to another places.
* If this tuple is in the chain of tuples created in updates
* by "recent" transactions then we have to move all chain of
* tuples to another places.
*/
if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
tuple.t_data->t_xmin >= XmaxRecent) ||
@ -1215,9 +1222,10 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
WriteBuffer(cur_buffer);
cur_buffer = InvalidBuffer;
}
/*
* If this tuple is in the begin/middle of the chain
* then we have to move to the end of chain.
* If this tuple is in the begin/middle of the chain then
* we have to move to the end of chain.
*/
while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
!(ItemPointerEquals(&(tp.t_self), &(tp.t_data->t_ctid))))
@ -1238,7 +1246,7 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
tlen = tp.t_len = ItemIdGetLength(Citemid);
}
/* first, can chain be moved ? */
for ( ; ; )
for (;;)
{
if (!vc_enough_space(to_vpd, tlen))
{
@ -1257,7 +1265,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (vc_enough_space(fraged_pages->vpl_pagedesc[i], tlen))
break;
}
if (i == num_fraged_pages) /* can't move item anywhere */
if (i == num_fraged_pages) /* can't move item
* anywhere */
{
for (i = 0; i < num_vtmove; i++)
{
@ -1289,16 +1298,18 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
vtmove[num_vtmove].cleanVpd = false;
free_vtmove--;
num_vtmove++;
/*
* All done ?
*/
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
tp.t_data->t_xmin < XmaxRecent)
break;
/*
* Well, try to find tuple with old row version
*/
for ( ; ; )
for (;;)
{
Buffer Pbuf;
Page Ppage;
@ -1326,19 +1337,20 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
elog(ERROR, "Parent itemid marked as unused");
Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
Assert(Ptp.t_data->t_xmax == tp.t_data->t_xmin);
/*
* If this tuple is updated version of row and
* it was created by the same transaction then
* no one is interested in this tuple -
* mark it as removed.
* If this tuple is updated version of row and it
* was created by the same transaction then no one
* is interested in this tuple - mark it as
* removed.
*/
if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
Ptp.t_data->t_xmin == Ptp.t_data->t_xmax)
{
TransactionIdStore(myXID,
(TransactionId*) &(Ptp.t_data->t_cmin));
(TransactionId *) &(Ptp.t_data->t_cmin));
Ptp.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
Ptp.t_data->t_infomask |= HEAP_MOVED_OFF;
WriteBuffer(Pbuf);
continue;
@ -1373,6 +1385,7 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
/* Get page to move in */
cur_buffer = ReadBuffer(onerel, vtmove[ti].vpd->vpd_blkno);
/*
* We should LockBuffer(cur_buffer) but don't, at the
* moment. If you'll do LockBuffer then UNLOCK it
@ -1385,9 +1398,9 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
vc_vacpage(ToPage, vtmove[ti].vpd);
heap_copytuple_with_tuple(&tuple, &newtup);
RelationInvalidateHeapTuple(onerel, &tuple);
TransactionIdStore(myXID, (TransactionId*) &(newtup.t_data->t_cmin));
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_OFF);
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
newtup.t_data->t_infomask |= HEAP_MOVED_IN;
newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len,
InvalidOffsetNumber, LP_USED);
@ -1401,6 +1414,7 @@ moving chain: failed to add item with len = %u to page %u",
pfree(newtup.t_data);
newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
ItemPointerSet(&(newtup.t_self), vtmove[ti].vpd->vpd_blkno, newoff);
/*
* Set t_ctid pointing to itself for last tuple in
* chain and to next tuple in chain otherwise.
@ -1411,12 +1425,13 @@ moving chain: failed to add item with len = %u to page %u",
newtup.t_data->t_ctid = Ctid;
Ctid = newtup.t_self;
TransactionIdStore(myXID, (TransactionId*) &(tuple.t_data->t_cmin));
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
num_moved++;
/*
* Remember that we moved tuple from the current page
* (corresponding index tuple will be cleaned).
@ -1508,12 +1523,12 @@ moving chain: failed to add item with len = %u to page %u",
RelationInvalidateHeapTuple(onerel, &tuple);
/*
* Mark new tuple as moved_in by vacuum and
* store vacuum XID in t_cmin !!!
* Mark new tuple as moved_in by vacuum and store vacuum XID
* in t_cmin !!!
*/
TransactionIdStore(myXID, (TransactionId*) &(newtup.t_data->t_cmin));
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_OFF);
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
newtup.t_data->t_infomask |= HEAP_MOVED_IN;
/* add tuple to the page */
@ -1533,12 +1548,12 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
newtup.t_self = newtup.t_data->t_ctid;
/*
* Mark old tuple as moved_off by vacuum and
* store vacuum XID in t_cmin !!!
* Mark old tuple as moved_off by vacuum and store vacuum XID
* in t_cmin !!!
*/
TransactionIdStore(myXID, (TransactionId*) &(tuple.t_data->t_cmin));
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
cur_page->vpd_offsets_used++;
@ -1584,13 +1599,14 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
continue;
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (4)");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (chain_tuple_moved) /* some chains was moved while */
if (chain_tuple_moved) /* some chains was moved
* while */
{ /* cleaning this page */
Assert(vpc->vpd_offsets_free > 0);
for (i = 0; i < vpc->vpd_offsets_free; i++)
@ -1645,6 +1661,7 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (num_moved > 0)
{
/*
* We have to commit our tuple' movings before we'll truncate
* relation, but we shouldn't lose our locks. And so - quick hack:
@ -1657,8 +1674,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
}
/*
* Clean uncleaned reapped pages from vacuum_pages list list and set xmin
* committed for inserted tuples
* Clean uncleaned reapped pages from vacuum_pages list list and set
* xmin committed for inserted tuples
*/
checked_moved = 0;
for (i = 0, vpp = vacuum_pages->vpl_pagedesc; i < vacuumed_pages; i++, vpp++)
@ -1671,7 +1688,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (!PageIsEmpty(page))
vc_vacpage(page, *vpp);
}
else /* this page was used */
else
/* this page was used */
{
num_tuples = 0;
max_offset = PageGetMaxOffsetNumber(page);
@ -1685,7 +1703,7 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (2)");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
{
@ -1757,7 +1775,7 @@ Elapsed %u/%u sec.",
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (3)");
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@ -2317,8 +2335,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
attp->attdisbursion = selratio;
/*
* Invalidate the cache for the tuple
* and write the buffer
* Invalidate the cache for the tuple and write the buffer
*/
RelationInvalidateHeapTuple(ad, atup);
WriteNoReleaseBuffer(abuffer);
@ -2375,8 +2392,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
}
/*
* Invalidate the cached pg_class tuple and
* write the buffer
* Invalidate the cached pg_class tuple and write the buffer
*/
RelationInvalidateHeapTuple(rd, &rtup);
@ -2527,16 +2543,16 @@ vc_find_eq(void *bot, int nelem, int size, void *elm,
}
if (last_move == true)
{
res = compar(elm, (void *)((char *)bot + last * size));
res = compar(elm, (void *) ((char *) bot + last * size));
if (res > 0)
return NULL;
if (res == 0)
return (void *)((char *)bot + last * size);
return (void *) ((char *) bot + last * size);
last_move = false;
}
res = compar(elm, (void *)((char *)bot + celm * size));
res = compar(elm, (void *) ((char *) bot + celm * size));
if (res == 0)
return (void *)((char *)bot + celm * size);
return (void *) ((char *) bot + celm * size);
if (res < 0)
{
if (celm == 0)
@ -2551,7 +2567,7 @@ vc_find_eq(void *bot, int nelem, int size, void *elm,
return NULL;
last = last - celm - 1;
bot = (void *)((char *)bot + (celm + 1) * size);
bot = (void *) ((char *) bot + (celm + 1) * size);
celm = (last + 1) / 2;
first_move = true;
}
@ -2591,25 +2607,25 @@ static int
vc_cmp_vtlinks(const void *left, const void *right)
{
if (((VTupleLink)left)->new_tid.ip_blkid.bi_hi <
((VTupleLink)right)->new_tid.ip_blkid.bi_hi)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi <
((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
return -1;
if (((VTupleLink)left)->new_tid.ip_blkid.bi_hi >
((VTupleLink)right)->new_tid.ip_blkid.bi_hi)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi >
((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
return 1;
/* bi_hi-es are equal */
if (((VTupleLink)left)->new_tid.ip_blkid.bi_lo <
((VTupleLink)right)->new_tid.ip_blkid.bi_lo)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo <
((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
return -1;
if (((VTupleLink)left)->new_tid.ip_blkid.bi_lo >
((VTupleLink)right)->new_tid.ip_blkid.bi_lo)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo >
((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
return 1;
/* bi_lo-es are equal */
if (((VTupleLink)left)->new_tid.ip_posid <
((VTupleLink)right)->new_tid.ip_posid)
if (((VTupleLink) left)->new_tid.ip_posid <
((VTupleLink) right)->new_tid.ip_posid)
return -1;
if (((VTupleLink)left)->new_tid.ip_posid >
((VTupleLink)right)->new_tid.ip_posid)
if (((VTupleLink) left)->new_tid.ip_posid >
((VTupleLink) right)->new_tid.ip_posid)
return 1;
return 0;

View File

@ -2,7 +2,7 @@
* Routines for handling of 'SET var TO',
* 'SHOW var' and 'RESET var' statements.
*
* $Id: variable.c,v 1.19 1999/02/18 06:00:44 momjian Exp $
* $Id: variable.c,v 1.20 1999/05/25 16:08:28 momjian Exp $
*
*/
@ -45,10 +45,12 @@ static bool parse_ksqo(const char *);
static bool show_XactIsoLevel(void);
static bool reset_XactIsoLevel(void);
static bool parse_XactIsoLevel(const char *);
#ifdef QUERY_LIMIT
static bool show_query_limit(void);
static bool reset_query_limit(void);
static bool parse_query_limit(const char *);
#endif
extern Cost _cpu_page_wight_;
@ -547,17 +549,17 @@ parse_query_limit(const char *value)
{
int32 limit;
if (value == NULL) {
if (value == NULL)
{
reset_query_limit();
return(TRUE);
return (TRUE);
}
/* why is pg_atoi's arg not declared "const char *" ? */
limit = pg_atoi((char *) value, sizeof(int32), '\0');
if (limit <= -1) {
if (limit <= -1)
elog(ERROR, "Bad value for # of query limit (%s)", value);
}
ExecutorLimit(limit);
return(TRUE);
return (TRUE);
}
static bool
@ -566,20 +568,20 @@ show_query_limit(void)
int limit;
limit = ExecutorGetLimit();
if (limit == ALL_TUPLES) {
if (limit == ALL_TUPLES)
elog(NOTICE, "No query limit is set");
} else {
elog(NOTICE, "query limit is %d",limit);
}
return(TRUE);
else
elog(NOTICE, "query limit is %d", limit);
return (TRUE);
}
static bool
reset_query_limit(void)
{
ExecutorLimit(ALL_TUPLES);
return(TRUE);
return (TRUE);
}
#endif
/*-----------------------------------------------------------------------*/

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: view.c,v 1.32 1999/02/13 23:15:12 momjian Exp $
* $Id: view.c,v 1.33 1999/05/25 16:08:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -14,7 +14,7 @@
* ExecInitTee
* ExecEndTee
*
* $Id: nodeTee.c,v 1.1 1999/03/23 16:50:49 momjian Exp $
* $Id: nodeTee.c,v 1.2 1999/05/25 16:08:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -339,6 +339,7 @@ ExecTee(Tee *node, Plan *parent)
slot = ExecProcNode(childNode, (Plan *) node);
if (!TupIsNull(slot))
{
/*
* heap_insert changes something...
*/

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: execAmi.c,v 1.34 1999/05/10 00:45:05 momjian Exp $
* $Id: execAmi.c,v 1.35 1999/05/25 16:08:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.83 1999/05/10 00:45:06 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.84 1999/05/25 16:08:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -71,9 +71,9 @@ static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan,
int offsetTuples,
int numberTuples,
ScanDirection direction,
DestReceiver *destfunc);
DestReceiver * destfunc);
static void ExecRetrieve(TupleTableSlot *slot,
DestReceiver *destfunc,
DestReceiver * destfunc,
EState *estate);
static void ExecAppend(TupleTableSlot *slot, ItemPointer tupleid,
EState *estate);
@ -141,7 +141,7 @@ ExecutorStart(QueryDesc *queryDesc, EState *estate)
estate->es_snapshot = NULL;
else
{
estate->es_snapshot = (Snapshot)palloc(sizeof(SnapshotData));
estate->es_snapshot = (Snapshot) palloc(sizeof(SnapshotData));
memcpy(estate->es_snapshot, QuerySnapshot, sizeof(SnapshotData));
if (estate->es_snapshot->xcnt > 0)
{
@ -210,8 +210,8 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
Assert(queryDesc != NULL);
/*
* extract information from the query descriptor
* and the query feature.
* extract information from the query descriptor and the query
* feature.
*/
operation = queryDesc->operation;
plan = queryDesc->plantree;
@ -223,8 +223,8 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
/*
* FIXME: the dest setup function ought to be handed the tuple desc
* for the tuples to be output, but I'm not quite sure how to get that
* info at this point. For now, passing NULL is OK because no existing
* dest setup function actually uses the pointer.
* info at this point. For now, passing NULL is OK because no
* existing dest setup function actually uses the pointer.
*/
(*destfunc->setup) (destfunc, (TupleDesc) NULL);
@ -241,12 +241,12 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
switch (nodeTag(limoffset))
{
case T_Const:
coffset = (Const *)limoffset;
offset = (int)(coffset->constvalue);
coffset = (Const *) limoffset;
offset = (int) (coffset->constvalue);
break;
case T_Param:
poffset = (Param *)limoffset;
poffset = (Param *) limoffset;
paramLI = estate->es_param_list_info;
if (paramLI == NULL)
@ -260,7 +260,7 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
elog(ERROR, "parameter for limit offset not in executor state");
if (paramLI[i].isnull)
elog(ERROR, "limit offset cannot be NULL value");
offset = (int)(paramLI[i].value);
offset = (int) (paramLI[i].value);
break;
@ -285,12 +285,12 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
switch (nodeTag(limcount))
{
case T_Const:
ccount = (Const *)limcount;
count = (int)(ccount->constvalue);
ccount = (Const *) limcount;
count = (int) (ccount->constvalue);
break;
case T_Param:
pcount = (Param *)limcount;
pcount = (Param *) limcount;
paramLI = estate->es_param_list_info;
if (paramLI == NULL)
@ -304,7 +304,7 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
elog(ERROR, "parameter for limit count not in executor state");
if (paramLI[i].isnull)
elog(ERROR, "limit count cannot be NULL value");
count = (int)(paramLI[i].value);
count = (int) (paramLI[i].value);
break;
@ -352,8 +352,8 @@ ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature,
break;
/*
* return one tuple but don't "retrieve" it.
* (this is used by the rule manager..) -cim 9/14/89
* return one tuple but don't "retrieve" it. (this is used by
* the rule manager..) -cim 9/14/89
*/
case EXEC_RETONE:
result = ExecutePlan(estate,
@ -395,18 +395,23 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
EndPlan(queryDesc->plantree, estate);
/* XXX - clean up some more from ExecutorStart() - er1p */
if (NULL == estate->es_snapshot) {
if (NULL == estate->es_snapshot)
{
/* nothing to free */
} else {
if (estate->es_snapshot->xcnt > 0) {
pfree(estate->es_snapshot->xip);
}
else
{
if (estate->es_snapshot->xcnt > 0)
pfree(estate->es_snapshot->xip);
pfree(estate->es_snapshot);
}
if (NULL == estate->es_param_exec_vals) {
if (NULL == estate->es_param_exec_vals)
{
/* nothing to free */
} else {
}
else
{
pfree(estate->es_param_exec_vals);
estate->es_param_exec_vals = NULL;
}
@ -514,7 +519,7 @@ ExecCheckPerms(CmdType operation,
if (!(rm->info & ROW_ACL_FOR_UPDATE))
continue;
relid = ((RangeTblEntry *)nth(rm->rti - 1, rangeTable))->relid;
relid = ((RangeTblEntry *) nth(rm->rti - 1, rangeTable))->relid;
htup = SearchSysCacheTuple(RELOID,
ObjectIdGetDatum(relid),
0, 0, 0);
@ -586,10 +591,9 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
estate->es_range_table = rangeTable;
/*
* initialize the BaseId counter so node base_id's
* are assigned correctly. Someday baseid's will have to
* be stored someplace other than estate because they
* should be unique per query planned.
* initialize the BaseId counter so node base_id's are assigned
* correctly. Someday baseid's will have to be stored someplace other
* than estate because they should be unique per query planned.
*/
estate->es_BaseId = 1;
@ -599,9 +603,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (resultRelation != 0 && operation != CMD_SELECT)
{
/*
* if we have a result relation, open it and
* initialize the result relation info stuff.
* if we have a result relation, open it and initialize the result
* relation info stuff.
*/
RelationInfo *resultRelationInfo;
Index resultRelationIndex;
@ -628,8 +633,8 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
resultRelationInfo->ri_IndexRelationInfo = NULL;
/*
* open indices on result relation and save descriptors
* in the result relation information..
* open indices on result relation and save descriptors in the
* result relation information..
*/
if (operation != CMD_DELETE)
ExecOpenIndices(resultRelationOid, resultRelationInfo);
@ -638,6 +643,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
/*
* if no result relation, then set state appropriately
*/
@ -659,12 +665,12 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
foreach(l, parseTree->rowMark)
{
rm = lfirst(l);
relid = ((RangeTblEntry *)nth(rm->rti - 1, rangeTable))->relid;
relid = ((RangeTblEntry *) nth(rm->rti - 1, rangeTable))->relid;
relation = heap_open(relid);
LockRelation(relation, RowShareLock);
if (!(rm->info & ROW_MARK_FOR_UPDATE))
continue;
erm = (execRowMark*) palloc(sizeof(execRowMark));
erm = (execRowMark *) palloc(sizeof(execRowMark));
erm->relation = relation;
erm->rti = rm->rti;
sprintf(erm->resname, "ctid%u", rm->rti);
@ -683,31 +689,29 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
/*
* initialize the private state information for
* all the nodes in the query tree. This opens
* files, allocates storage and leaves us ready
* to start processing tuples..
* initialize the private state information for all the nodes in the
* query tree. This opens files, allocates storage and leaves us
* ready to start processing tuples..
*/
ExecInitNode(plan, estate, NULL);
/*
* get the tuple descriptor describing the type
* of tuples to return.. (this is especially important
* if we are creating a relation with "retrieve into")
* get the tuple descriptor describing the type of tuples to return..
* (this is especially important if we are creating a relation with
* "retrieve into")
*/
tupType = ExecGetTupType(plan); /* tuple descriptor */
targetList = plan->targetlist;
len = ExecTargetListLength(targetList); /* number of attributes */
/*
* now that we have the target list, initialize the junk filter
* if this is a REPLACE or a DELETE query.
* We also init the junk filter if this is an append query
* (there might be some rule lock info there...)
* NOTE: in the future we might want to initialize the junk
* filter for all queries.
* SELECT added by daveh@insightdist.com 5/20/98 to allow
* ORDER/GROUP BY have an identifier missing from the target.
* now that we have the target list, initialize the junk filter if
* this is a REPLACE or a DELETE query. We also init the junk filter
* if this is an append query (there might be some rule lock info
* there...) NOTE: in the future we might want to initialize the junk
* filter for all queries. SELECT added by daveh@insightdist.com
* 5/20/98 to allow ORDER/GROUP BY have an identifier missing from the
* target.
*/
{
bool junk_filter_needed = false;
@ -761,6 +765,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
*/
if (parseTree->into != NULL)
{
/*
* create the "into" relation
*/
@ -772,14 +777,14 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
tupdesc = CreateTupleDescCopy(tupType);
intoRelationId = heap_create_with_catalog(intoName,
tupdesc, RELKIND_RELATION,parseTree->isTemp);
tupdesc, RELKIND_RELATION, parseTree->isTemp);
FreeTupleDesc(tupdesc);
/*
* XXX rather than having to call setheapoverride(true)
* and then back to false, we should change the
* arguments to heap_open() instead..
* and then back to false, we should change the arguments
* to heap_open() instead..
*/
setheapoverride(true);
@ -879,7 +884,7 @@ ExecutePlan(EState *estate,
int offsetTuples,
int numberTuples,
ScanDirection direction,
DestReceiver* destfunc)
DestReceiver * destfunc)
{
JunkFilter *junkfilter;
TupleTableSlot *slot;
@ -901,17 +906,18 @@ ExecutePlan(EState *estate,
estate->es_direction = direction;
/*
* Loop until we've processed the proper number
* of tuples from the plan..
* Loop until we've processed the proper number of tuples from the
* plan..
*/
for (;;)
{
/*
* Execute the plan and obtain a tuple
*/
/* at the top level, the parent of a plan (2nd arg) is itself */
lnext:;
lnext: ;
if (estate->es_useEvalPlan)
{
slot = EvalPlanQualNext(estate);
@ -922,9 +928,8 @@ lnext:;
slot = ExecProcNode(plan, plan);
/*
* if the tuple is null, then we assume
* there is nothing more to process so
* we just return null...
* if the tuple is null, then we assume there is nothing more to
* process so we just return null...
*/
if (TupIsNull(slot))
{
@ -933,11 +938,9 @@ lnext:;
}
/*
* For now we completely execute the plan and skip
* result tuples if requested by LIMIT offset.
* Finally we should try to do it in deeper levels
* if possible (during index scan)
* - Jan
* For now we completely execute the plan and skip result tuples
* if requested by LIMIT offset. Finally we should try to do it in
* deeper levels if possible (during index scan) - Jan
*/
if (offsetTuples > 0)
{
@ -946,11 +949,10 @@ lnext:;
}
/*
* if we have a junk filter, then project a new
* tuple with the junk removed.
* if we have a junk filter, then project a new tuple with the
* junk removed.
*
* Store this new "clean" tuple in the place of the
* original tuple.
* Store this new "clean" tuple in the place of the original tuple.
*
* Also, extract all the junk information we need.
*/
@ -989,8 +991,8 @@ lnext:;
TupleTableSlot *newSlot;
int test;
lmark:;
foreach (l, estate->es_rowMark)
lmark: ;
foreach(l, estate->es_rowMark)
{
erm = lfirst(l);
if (!ExecGetJunkAttribute(junkfilter,
@ -1016,10 +1018,10 @@ lmark:;
if (XactIsoLevel == XACT_SERIALIZABLE)
{
elog(ERROR, "Can't serialize access due to concurrent update");
return(NULL);
return (NULL);
}
else if (!(ItemPointerEquals(&(tuple.t_self),
(ItemPointer)DatumGetPointer(datum))))
(ItemPointer) DatumGetPointer(datum))))
{
newSlot = EvalPlanQual(estate, erm->rti, &(tuple.t_self));
if (!(TupIsNull(newSlot)))
@ -1029,16 +1031,17 @@ lmark:;
goto lmark;
}
}
/*
* if tuple was deleted or PlanQual failed
* for updated tuple - we have not return
* this tuple!
* if tuple was deleted or PlanQual failed for
* updated tuple - we have not return this
* tuple!
*/
goto lnext;
default:
elog(ERROR, "Unknown status %u from heap_mark4update", test);
return(NULL);
return (NULL);
}
}
}
@ -1057,17 +1060,17 @@ lmark:;
} /* if (junkfilter... */
/*
* now that we have a tuple, do the appropriate thing
* with it.. either return it to the user, add
* it to a relation someplace, delete it from a
* relation, or modify some of it's attributes.
* now that we have a tuple, do the appropriate thing with it..
* either return it to the user, add it to a relation someplace,
* delete it from a relation, or modify some of it's attributes.
*/
switch (operation)
{
case CMD_SELECT:
ExecRetrieve(slot, /* slot containing tuple */
destfunc, /* destination's tuple-receiver obj */
destfunc, /* destination's tuple-receiver
* obj */
estate); /* */
result = slot;
break;
@ -1092,10 +1095,10 @@ lmark:;
result = NULL;
break;
}
/*
* check our tuple count.. if we've returned the
* proper number then return, else loop again and
* process more tuples..
* check our tuple count.. if we've returned the proper number
* then return, else loop again and process more tuples..
*/
current_tuple_count += 1;
if (numberTuples == current_tuple_count)
@ -1103,8 +1106,8 @@ lmark:;
}
/*
* here, result is either a slot containing a tuple in the case
* of a RETRIEVE or NULL otherwise.
* here, result is either a slot containing a tuple in the case of a
* RETRIEVE or NULL otherwise.
*/
return result;
}
@ -1121,7 +1124,7 @@ lmark:;
*/
static void
ExecRetrieve(TupleTableSlot *slot,
DestReceiver *destfunc,
DestReceiver * destfunc,
EState *estate)
{
HeapTuple tuple;
@ -1182,8 +1185,7 @@ ExecAppend(TupleTableSlot *slot,
resultRelationDesc = resultRelationInfo->ri_RelationDesc;
/*
* have to add code to preform unique checking here.
* cim -12/1/89
* have to add code to preform unique checking here. cim -12/1/89
*/
/* BEFORE ROW INSERT Triggers */
@ -1210,9 +1212,7 @@ ExecAppend(TupleTableSlot *slot,
*/
if (resultRelationDesc->rd_att->constr)
{
ExecConstraints("ExecAppend", resultRelationDesc, tuple, estate);
}
/*
* insert the tuple
@ -1224,9 +1224,9 @@ ExecAppend(TupleTableSlot *slot,
/*
* process indices
*
* Note: heap_insert adds a new tuple to a relation. As a side
* effect, the tupleid of the new tuple is placed in the new
* tuple's t_ctid field.
* Note: heap_insert adds a new tuple to a relation. As a side effect,
* the tupleid of the new tuple is placed in the new tuple's t_ctid
* field.
*/
numIndices = resultRelationInfo->ri_NumIndices;
if (numIndices > 0)
@ -1313,13 +1313,11 @@ ldelete:;
(estate->es_processed)++;
/*
* Note: Normally one would think that we have to
* delete index tuples associated with the
* heap tuple now..
* Note: Normally one would think that we have to delete index tuples
* associated with the heap tuple now..
*
* ... but in POSTGRES, we have no need to do this
* because the vacuum daemon automatically
* opens an index scan and deletes index tuples
* ... but in POSTGRES, we have no need to do this because the vacuum
* daemon automatically opens an index scan and deletes index tuples
* when it finds deleted heap tuples. -cim 9/27/89
*/
@ -1374,10 +1372,9 @@ ExecReplace(TupleTableSlot *slot,
resultRelationDesc = resultRelationInfo->ri_RelationDesc;
/*
* have to add code to preform unique checking here.
* in the event of unique tuples, this becomes a deletion
* of the original tuple affected by the replace.
* cim -12/1/89
* have to add code to preform unique checking here. in the event of
* unique tuples, this becomes a deletion of the original tuple
* affected by the replace. cim -12/1/89
*/
/* BEFORE ROW UPDATE Triggers */
@ -1404,9 +1401,7 @@ ExecReplace(TupleTableSlot *slot,
*/
if (resultRelationDesc->rd_att->constr)
{
ExecConstraints("ExecReplace", resultRelationDesc, tuple, estate);
}
/*
* replace the heap tuple
@ -1448,23 +1443,21 @@ lreplace:;
(estate->es_processed)++;
/*
* Note: instead of having to update the old index tuples
* associated with the heap tuple, all we do is form
* and insert new index tuples.. This is because
* replaces are actually deletes and inserts and
* index tuple deletion is done automagically by
* the vaccuum deamon.. All we do is insert new
* index tuples. -cim 9/27/89
* Note: instead of having to update the old index tuples associated
* with the heap tuple, all we do is form and insert new index
* tuples.. This is because replaces are actually deletes and inserts
* and index tuple deletion is done automagically by the vaccuum
* deamon.. All we do is insert new index tuples. -cim 9/27/89
*/
/*
* process indices
*
* heap_replace updates a tuple in the base relation by invalidating
* it and then appending a new tuple to the relation. As a side
* effect, the tupleid of the new tuple is placed in the new
* tuple's t_ctid field. So we now insert index tuples using
* the new tupleid stored there.
* heap_replace updates a tuple in the base relation by invalidating it
* and then appending a new tuple to the relation. As a side effect,
* the tupleid of the new tuple is placed in the new tuple's t_ctid
* field. So we now insert index tuples using the new tupleid stored
* there.
*/
numIndices = resultRelationInfo->ri_NumIndices;
@ -1584,7 +1577,7 @@ ExecRelCheck(Relation rel, HeapTuple tuple, EState *estate)
if (estate->es_result_relation_constraints == NULL)
{
estate->es_result_relation_constraints =
(List **)palloc(ncheck * sizeof(List *));
(List **) palloc(ncheck * sizeof(List *));
for (i = 0; i < ncheck; i++)
{
@ -1642,10 +1635,10 @@ ExecConstraints(char *caller, Relation rel, HeapTuple tuple, EState *estate)
return;
}
TupleTableSlot*
TupleTableSlot *
EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
{
evalPlanQual *epq = (evalPlanQual*) estate->es_evalPlanQual;
evalPlanQual *epq = (evalPlanQual *) estate->es_evalPlanQual;
evalPlanQual *oldepq;
EState *epqstate = NULL;
Relation relation;
@ -1665,9 +1658,9 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
/*
* If this is request for another RTE - Ra, - then we have to check
* wasn't PlanQual requested for Ra already and if so then Ra' row
* was updated again and we have to re-start old execution for Ra
* and forget all what we done after Ra was suspended. Cool? -:))
* wasn't PlanQual requested for Ra already and if so then Ra' row was
* updated again and we have to re-start old execution for Ra and
* forget all what we done after Ra was suspended. Cool? -:))
*/
if (epq != NULL && epq->rti != rti &&
epq->estate.es_evTuple[rti - 1] != NULL)
@ -1676,7 +1669,7 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
{
/* pop previous PlanQual from the stack */
epqstate = &(epq->estate);
oldepq = (evalPlanQual*) epqstate->es_evalPlanQual;
oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
Assert(oldepq->rti != 0);
/* stop execution */
ExecEndNode(epq->plan, epq->plan);
@ -1700,7 +1693,7 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
if (newepq == NULL) /* first call or freePQ stack is empty */
{
newepq = (evalPlanQual*) palloc(sizeof(evalPlanQual));
newepq = (evalPlanQual *) palloc(sizeof(evalPlanQual));
/* Init EState */
epqstate = &(newepq->estate);
memset(epqstate, 0, sizeof(EState));
@ -1719,24 +1712,20 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
/* ... rest */
newepq->plan = copyObject(estate->es_origPlan);
newepq->free = NULL;
epqstate->es_evTupleNull = (bool*)
epqstate->es_evTupleNull = (bool *)
palloc(length(estate->es_range_table) * sizeof(bool));
if (epq == NULL) /* first call */
{
epqstate->es_evTuple = (HeapTuple*)
epqstate->es_evTuple = (HeapTuple *)
palloc(length(estate->es_range_table) * sizeof(HeapTuple));
memset(epqstate->es_evTuple, 0,
length(estate->es_range_table) * sizeof(HeapTuple));
}
else
{
epqstate->es_evTuple = epq->estate.es_evTuple;
}
}
else
{
epqstate = &(newepq->estate);
}
/* push current PQ to the stack */
epqstate->es_evalPlanQual = (Pointer) epq;
epq = newepq;
@ -1748,9 +1737,8 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
epqstate = &(epq->estate);
/*
* Ok - we're requested for the same RTE (-:)).
* I'm not sure about ability to use ExecReScan instead of
* ExecInitNode, so...
* Ok - we're requested for the same RTE (-:)). I'm not sure about
* ability to use ExecReScan instead of ExecInitNode, so...
*/
if (endNode)
ExecEndNode(epq->plan, epq->plan);
@ -1770,15 +1758,15 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
{
List *l;
foreach (l, estate->es_rowMark)
foreach(l, estate->es_rowMark)
{
if (((execRowMark*) lfirst(l))->rti == rti)
if (((execRowMark *) lfirst(l))->rti == rti)
break;
}
relation = ((execRowMark*) lfirst(l))->relation;
relation = ((execRowMark *) lfirst(l))->relation;
}
tuple.t_self = *tid;
for ( ; ; )
for (;;)
{
heap_fetch(relation, SnapshotDirty, &tuple, &buffer);
if (tuple.t_data != NULL)
@ -1787,9 +1775,10 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
if (TransactionIdIsValid(SnapshotDirty->xmin))
elog(ERROR, "EvalPlanQual: t_xmin is uncommitted ?!");
/*
* If tuple is being updated by other transaction then
* we have to wait for its commit/abort.
* If tuple is being updated by other transaction then we have
* to wait for its commit/abort.
*/
if (TransactionIdIsValid(xwait))
{
@ -1797,6 +1786,7 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
XactLockTableWait(xwait);
continue;
}
/*
* Nice! We got tuple - now copy it.
*/
@ -1806,10 +1796,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
ReleaseBuffer(buffer);
break;
}
/*
* Ops! Invalid tuple. Have to check is it updated or deleted.
* Note that it's possible to get invalid SnapshotDirty->tid
* if tuple updated by this transaction. Have we to check this ?
* Note that it's possible to get invalid SnapshotDirty->tid if
* tuple updated by this transaction. Have we to check this ?
*/
if (ItemPointerIsValid(&(SnapshotDirty->tid)) &&
!(ItemPointerEquals(&(tuple.t_self), &(SnapshotDirty->tid))))
@ -1817,11 +1808,12 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
tuple.t_self = SnapshotDirty->tid; /* updated ... */
continue;
}
/*
* Deleted or updated by this transaction. Do not
* (re-)start execution of this PQ. Continue previous PQ.
* Deleted or updated by this transaction. Do not (re-)start
* execution of this PQ. Continue previous PQ.
*/
oldepq = (evalPlanQual*) epqstate->es_evalPlanQual;
oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
if (oldepq != NULL)
{
Assert(oldepq->rti != 0);
@ -1832,10 +1824,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
estate->es_evalPlanQual = (Pointer) epq;
}
else
{ /* this is the first (oldest) PQ
epq->rti = 0; * - mark as free and
estate->es_useEvalPlan = false; * continue Query execution
return (NULL); */
{ /* this is the first (oldest) PQ epq->rti
* = 0; * - mark as
* free and estate->es_useEvalPlan =
* false; * continue Query execution
* return (NULL); */
}
}
@ -1847,18 +1840,18 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
ExecInitNode(epq->plan, epqstate, NULL);
/*
* For UPDATE/DELETE we have to return tid of actual row
* we're executing PQ for.
* For UPDATE/DELETE we have to return tid of actual row we're
* executing PQ for.
*/
*tid = tuple.t_self;
return (EvalPlanQualNext(estate));
}
static TupleTableSlot*
static TupleTableSlot *
EvalPlanQualNext(EState *estate)
{
evalPlanQual *epq = (evalPlanQual*) estate->es_evalPlanQual;
evalPlanQual *epq = (evalPlanQual *) estate->es_evalPlanQual;
EState *epqstate = &(epq->estate);
evalPlanQual *oldepq;
TupleTableSlot *slot;
@ -1877,11 +1870,12 @@ lpqnext:;
pfree(epqstate->es_evTuple[epq->rti - 1]);
epqstate->es_evTuple[epq->rti - 1] = NULL;
/* pop old PQ from the stack */
oldepq = (evalPlanQual*) epqstate->es_evalPlanQual;
if (oldepq == (evalPlanQual*) NULL)
oldepq = (evalPlanQual *) epqstate->es_evalPlanQual;
if (oldepq == (evalPlanQual *) NULL)
{ /* this is the first (oldest) */
epq->rti = 0; /* PQ - mark as free and */
estate->es_useEvalPlan = false; /* continue Query execution */
estate->es_useEvalPlan = false; /* continue Query
* execution */
return (NULL);
}
Assert(oldepq->rti != 0);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.50 1999/03/20 02:07:31 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.51 1999/05/25 16:08:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,7 +66,7 @@ bool execConstByVal;
int execConstLen;
/* static functions decls */
static Datum ExecEvalAggref(Aggref *aggref, ExprContext *econtext, bool *isNull);
static Datum ExecEvalAggref(Aggref * aggref, ExprContext *econtext, bool *isNull);
static Datum ExecEvalArrayRef(ArrayRef *arrayRef, ExprContext *econtext,
bool *isNull, bool *isDone);
static Datum ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull);
@ -190,7 +190,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
* ----------------------------------------------------------------
*/
static Datum
ExecEvalAggref(Aggref *aggref, ExprContext *econtext, bool *isNull)
ExecEvalAggref(Aggref * aggref, ExprContext *econtext, bool *isNull)
{
*isNull = econtext->ecxt_nulls[aggref->aggno];
return econtext->ecxt_values[aggref->aggno];
@ -305,16 +305,15 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
return (Datum) NULL;
/*
* get length and type information..
* ??? what should we do about variable length attributes
* - variable length attributes have their length stored
* in the first 4 bytes of the memory pointed to by the
* returned value.. If we can determine that the type
* is a variable length type, we can do the right thing.
* -cim 9/15/89
* get length and type information.. ??? what should we do about
* variable length attributes - variable length attributes have their
* length stored in the first 4 bytes of the memory pointed to by the
* returned value.. If we can determine that the type is a variable
* length type, we can do the right thing. -cim 9/15/89
*/
if (attnum < 0)
{
/*
* If this is a pseudo-att, we get the type and fake the length.
* There ought to be a routine to return the real lengths, so
@ -609,11 +608,11 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
i = 0;
foreach(arg, argList)
{
/*
* evaluate the expression, in general functions cannot take
* sets as arguments but we make an exception in the case of
* nested dot expressions. We have to watch out for this case
* here.
* evaluate the expression, in general functions cannot take sets
* as arguments but we make an exception in the case of nested dot
* expressions. We have to watch out for this case here.
*/
argV[i] = (Datum)
ExecEvalExpr((Node *) lfirst(arg),
@ -671,10 +670,10 @@ ExecMakeFunctionResult(Node *node,
}
/*
* arguments is a list of expressions to evaluate
* before passing to the function manager.
* We collect the results of evaluating the expressions
* into a datum array (argV) and pass this array to arrayFmgr()
* arguments is a list of expressions to evaluate before passing to
* the function manager. We collect the results of evaluating the
* expressions into a datum array (argV) and pass this array to
* arrayFmgr()
*/
if (fcache->nargs != 0)
{
@ -845,10 +844,10 @@ ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull)
/*
* an opclause is a list (op args). (I think)
*
* we extract the oid of the function associated with
* the op and then pass the work onto ExecMakeFunctionResult
* which evaluates the arguments and returns the result of
* calling the function on the evaluated arguments.
* we extract the oid of the function associated with the op and then
* pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* evaluated arguments.
*/
op = (Oper *) opClause->oper;
argList = opClause->args;
@ -889,10 +888,10 @@ ExecEvalFunc(Expr *funcClause,
/*
* an funcclause is a list (func args). (I think)
*
* we extract the oid of the function associated with
* the func node and then pass the work onto ExecMakeFunctionResult
* which evaluates the arguments and returns the result of
* calling the function on the evaluated arguments.
* we extract the oid of the function associated with the func node and
* then pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* evaluated arguments.
*
* this is nearly identical to the ExecEvalOper code.
*/
@ -939,21 +938,21 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
clause = lfirst(notclause->args);
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone flag,
* but ignore it.
*/
expr_value = ExecEvalExpr(clause, econtext, isNull, &isDone);
/*
* if the expression evaluates to null, then we just
* cascade the null back to whoever called us.
* if the expression evaluates to null, then we just cascade the null
* back to whoever called us.
*/
if (*isNull)
return expr_value;
/*
* evaluation of 'not' is simple.. expr is false, then
* return 'true' and vice versa.
* evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa.
*/
if (DatumGetInt32(expr_value) == 0)
return (Datum) true;
@ -978,15 +977,12 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
clauses = orExpr->args;
/*
* we use three valued logic functions here...
* we evaluate each of the clauses in turn,
* as soon as one is true we return that
* value. If none is true and none of the
* clauses evaluate to NULL we return
* the value of the last clause evaluated (which
* should be false) with *isNull set to false else
* if none is true and at least one clause evaluated
* to NULL we set *isNull flag to true -
* we use three valued logic functions here... we evaluate each of the
* clauses in turn, as soon as one is true we return that value. If
* none is true and none of the clauses evaluate to NULL we return
* the value of the last clause evaluated (which should be false) with
* *isNull set to false else if none is true and at least one clause
* evaluated to NULL we set *isNull flag to true -
*/
foreach(clause, clauses)
{
@ -1001,28 +997,26 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if the expression evaluates to null, then we
* remember it in the local IsNull flag, if none of the
* clauses are true then we need to set *isNull
* to true again.
* if the expression evaluates to null, then we remember it in the
* local IsNull flag, if none of the clauses are true then we need
* to set *isNull to true again.
*/
if (*isNull)
{
IsNull = *isNull;
/*
* Many functions don't (or can't!) check if an argument is NULL
* or NOT_NULL and may return TRUE (1) with *isNull TRUE
* (an_int4_column <> 1: int4ne returns TRUE for NULLs).
* Not having time to fix the function manager I want to fix OR:
* if we had 'x <> 1 OR x isnull' then when x is NULL
* TRUE was returned by the 'x <> 1' clause ...
* but ExecQualClause says that the qualification should *fail*
* if isnull is TRUE for any value returned by ExecEvalExpr.
* So, force this rule here:
* if isnull is TRUE then the clause failed.
* Note: nullvalue() & nonnullvalue() always sets isnull to FALSE for NULLs.
* - vadim 09/22/97
* Many functions don't (or can't!) check if an argument is
* NULL or NOT_NULL and may return TRUE (1) with *isNull TRUE
* (an_int4_column <> 1: int4ne returns TRUE for NULLs). Not
* having time to fix the function manager I want to fix OR:
* if we had 'x <> 1 OR x isnull' then when x is NULL TRUE was
* returned by the 'x <> 1' clause ... but ExecQualClause says
* that the qualification should *fail* if isnull is TRUE for
* any value returned by ExecEvalExpr. So, force this rule
* here: if isnull is TRUE then the clause failed. Note:
* nullvalue() & nonnullvalue() always sets isnull to FALSE
* for NULLs. - vadim 09/22/97
*/
const_value = 0;
}
@ -1057,11 +1051,9 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
clauses = andExpr->args;
/*
* we evaluate each of the clauses in turn,
* as soon as one is false we return that
* value. If none are false or NULL then we return
* the value of the last clause evaluated, which
* should be true.
* we evaluate each of the clauses in turn, as soon as one is false we
* return that value. If none are false or NULL then we return the
* value of the last clause evaluated, which should be true.
*/
foreach(clause, clauses)
{
@ -1076,10 +1068,9 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if the expression evaluates to null, then we
* remember it in IsNull, if none of the clauses after
* this evaluates to false we will have to set *isNull
* to true again.
* if the expression evaluates to null, then we remember it in
* IsNull, if none of the clauses after this evaluates to false we
* will have to set *isNull to true again.
*/
if (*isNull)
IsNull = *isNull;
@ -1106,7 +1097,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
ExecEvalCase(CaseExpr * caseExpr, ExprContext *econtext, bool *isNull)
{
List *clauses;
List *clause;
@ -1117,10 +1108,9 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
clauses = caseExpr->args;
/*
* we evaluate each of the WHEN clauses in turn,
* as soon as one is true we return the corresponding
* result. If none are true then we return the value
* of the default clause, or NULL.
* we evaluate each of the WHEN clauses in turn, as soon as one is
* true we return the corresponding result. If none are true then we
* return the value of the default clause, or NULL.
*/
foreach(clause, clauses)
{
@ -1137,8 +1127,8 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if we have a true test, then we return the result,
* since the case statement is satisfied.
* if we have a true test, then we return the result, since the
* case statement is satisfied.
*/
if (DatumGetInt32(const_value) != 0)
{
@ -1159,9 +1149,7 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
&isDone);
}
else
{
*isNull = true;
}
return const_value;
}
@ -1204,8 +1192,8 @@ ExecEvalExpr(Node *expression,
*isDone = true;
/*
* here we dispatch the work to the appropriate type
* of function given the type of our expression.
* here we dispatch the work to the appropriate type of function given
* the type of our expression.
*/
if (expression == NULL)
{
@ -1325,10 +1313,9 @@ ExecQualClause(Node *clause, ExprContext *econtext)
ExecEvalExpr(clause, econtext, &isNull, &isDone);
/*
* this is interesting behaviour here. When a clause evaluates
* to null, then we consider this as passing the qualification.
* it seems kind of like, if the qual is NULL, then there's no
* qual..
* this is interesting behaviour here. When a clause evaluates to
* null, then we consider this as passing the qualification. it seems
* kind of like, if the qual is NULL, then there's no qual..
*/
if (isNull)
return true;
@ -1371,12 +1358,12 @@ ExecQual(List *qual, ExprContext *econtext)
return true;
/*
* a "qual" is a list of clauses. To evaluate the
* qual, we evaluate each of the clauses in the list.
* a "qual" is a list of clauses. To evaluate the qual, we evaluate
* each of the clauses in the list.
*
* ExecQualClause returns true when we know the qualification
* *failed* so we just pass each clause in qual to it until
* we know the qual failed or there are no more clauses.
* ExecQualClause returns true when we know the qualification *failed* so
* we just pass each clause in qual to it until we know the qual
* failed or there are no more clauses.
*/
result = false;
@ -1388,9 +1375,9 @@ ExecQual(List *qual, ExprContext *econtext)
}
/*
* if result is true, then it means a clause failed so we
* return false. if result is false then it means no clause
* failed so we return true.
* if result is true, then it means a clause failed so we return
* false. if result is false then it means no clause failed so we
* return true.
*/
if (result == true)
return false;
@ -1454,41 +1441,39 @@ ExecTargetList(List *targetlist,
EV_printf("\n");
/*
* Return a dummy tuple if the targetlist is empty.
* the dummy tuple is necessary to differentiate
* between passing and failing the qualification.
* Return a dummy tuple if the targetlist is empty. the dummy tuple is
* necessary to differentiate between passing and failing the
* qualification.
*/
if (targetlist == NIL)
{
/*
* I now think that the only time this makes
* any sense is when we run a delete query. Then
* we need to return something other than nil
* so we know to delete the tuple associated
* with the saved tupleid.. see what ExecutePlan
* does with the returned tuple.. -cim 9/21/89
* I now think that the only time this makes any sense is when we
* run a delete query. Then we need to return something other
* than nil so we know to delete the tuple associated with the
* saved tupleid.. see what ExecutePlan does with the returned
* tuple.. -cim 9/21/89
*
* It could also happen in queries like:
* retrieve (foo.all) where bar.a = 3
* It could also happen in queries like: retrieve (foo.all) where
* bar.a = 3
*
* is this a new phenomenon? it might cause bogus behavior
* if we try to free this tuple later!! I put a hook in
* ExecProject to watch out for this case -mer 24 Aug 1992
* is this a new phenomenon? it might cause bogus behavior if we try
* to free this tuple later!! I put a hook in ExecProject to watch
* out for this case -mer 24 Aug 1992
*
* We must return dummy tuple!!! Try
* select t1.x from t1, t2 where t1.y = 1 and t2.y = 1
* - t2 scan target list will be empty and so no one tuple
* will be returned! But Mer was right - dummy tuple
* must be palloced... - vadim 03/01/1999
* We must return dummy tuple!!! Try select t1.x from t1, t2 where
* t1.y = 1 and t2.y = 1 - t2 scan target list will be empty and
* so no one tuple will be returned! But Mer was right - dummy
* tuple must be palloced... - vadim 03/01/1999
*/
*isDone = true;
return (HeapTuple) palloc(1);
}
/*
* allocate an array of char's to hold the "null" information
* only if we have a really large targetlist. otherwise we use
* the stack.
* allocate an array of char's to hold the "null" information only if
* we have a really large targetlist. otherwise we use the stack.
*/
if (nodomains > 64)
{
@ -1509,13 +1494,14 @@ ExecTargetList(List *targetlist,
*isDone = true;
foreach(tl, targetlist)
{
/*
* remember, a target list is a list of lists:
*
* ((<resdom | fjoin> expr) (<resdom | fjoin> expr) ...)
*
* tl is a pointer to successive cdr's of the targetlist
* tle is a pointer to the target list entry in tl
* tl is a pointer to successive cdr's of the targetlist tle is a
* pointer to the target list entry in tl
*/
tle = lfirst(tl);
@ -1660,9 +1646,8 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
/*
* store the tuple in the projection slot and return the slot.
*
* If there's no projection target list we don't want to pfree
* the bogus tuple that ExecTargetList passes back to us.
* -mer 24 Aug 1992
* If there's no projection target list we don't want to pfree the bogus
* tuple that ExecTargetList passes back to us. -mer 24 Aug 1992
*/
return (TupleTableSlot *)
ExecStoreTuple(newTuple,/* tuple to store */
@ -1670,4 +1655,3 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
InvalidBuffer, /* tuple has no buffer */
true);
}

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.24 1999/03/23 16:50:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.25 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -467,6 +467,7 @@ ExecSetSlotPolicy(TupleTableSlot *slot, /* slot to change */
return old_shouldFree;
}
#endif
/* --------------------------------
@ -650,6 +651,7 @@ ExecInitMarkedTupleSlot(EState *estate, MergeJoinState *mergestate)
INIT_SLOT_ALLOC;
mergestate->mj_MarkedTupleSlot = (TupleTableSlot *) slot;
}
#endif
/* ----------------

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.44 1999/03/20 01:13:22 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.45 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -711,7 +711,7 @@ ExecGetIndexKeyInfo(Form_pg_index indexTuple,
*/
CXT1_printf("ExecGetIndexKeyInfo: context is %d\n", CurrentMemoryContext);
attKeys = (AttrNumber *)palloc(numKeys * sizeof(AttrNumber));
attKeys = (AttrNumber *) palloc(numKeys * sizeof(AttrNumber));
for (i = 0; i < numKeys; i++)
attKeys[i] = indexTuple->indkey[i];
@ -917,16 +917,17 @@ ExecOpenIndices(Oid resultRelationOid,
if (indexDesc != NULL)
{
relationDescs[i++] = indexDesc;
/*
* Hack for not btree and hash indices: they use relation level
* exclusive locking on updation (i.e. - they are not ready
* for MVCC) and so we have to exclusively lock indices here
* to prevent deadlocks if we will scan them - index_beginscan
* places AccessShareLock, indices update methods don't use
* locks at all. We release this lock in ExecCloseIndices.
* Note, that hashes use page level locking - i.e. are not
* deadlock-free, - let's them be on their way -:))
* vadim 03-12-1998
* Hack for not btree and hash indices: they use relation
* level exclusive locking on updation (i.e. - they are
* not ready for MVCC) and so we have to exclusively lock
* indices here to prevent deadlocks if we will scan them
* - index_beginscan places AccessShareLock, indices
* update methods don't use locks at all. We release this
* lock in ExecCloseIndices. Note, that hashes use page
* level locking - i.e. are not deadlock-free, - let's
* them be on their way -:)) vadim 03-12-1998
*/
if (indexDesc->rd_rel->relam != BTREE_AM_OID &&
indexDesc->rd_rel->relam != HASH_AM_OID)
@ -1014,6 +1015,7 @@ ExecCloseIndices(RelationInfo *resultRelationInfo)
{
if (relationDescs[i] == NULL)
continue;
/*
* Notes in ExecOpenIndices.
*/
@ -1023,6 +1025,7 @@ ExecCloseIndices(RelationInfo *resultRelationInfo)
index_close(relationDescs[i]);
}
/*
* XXX should free indexInfo array here too.
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.25 1999/05/13 07:28:29 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.26 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -109,7 +109,7 @@ init_execution_state(FunctionCachePtr fcache,
planTree_list = pg_parse_and_plan(fcache->src, fcache->argOidVect,
nargs, &queryTree_list, None, FALSE);
foreach (qtl_item, queryTree_list)
foreach(qtl_item, queryTree_list)
{
Query *queryTree = lfirst(qtl_item);
Plan *planTree = lfirst(planTree_list);
@ -199,7 +199,7 @@ postquel_getnext(execution_state *es)
feature = (LAST_POSTQUEL_COMMAND(es)) ? EXEC_RETONE : EXEC_RUN;
return ExecutorRun(es->qd, es->estate, feature, (Node *)NULL, (Node *)NULL);
return ExecutorRun(es->qd, es->estate, feature, (Node *) NULL, (Node *) NULL);
}
static void

View File

@ -45,7 +45,7 @@ typedef struct AggFuncInfo
FmgrInfo finalfn;
} AggFuncInfo;
static Datum aggGetAttr(TupleTableSlot *tuple, Aggref *aggref, bool *isNull);
static Datum aggGetAttr(TupleTableSlot *tuple, Aggref * aggref, bool *isNull);
/* ---------------------------------------
@ -121,7 +121,8 @@ ExecAgg(Agg *node)
*/
/*
* We loop retrieving groups until we find one matching node->plan.qual
* We loop retrieving groups until we find one matching
* node->plan.qual
*/
do
{
@ -245,6 +246,7 @@ ExecAgg(Agg *node)
outerslot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(outerslot))
{
/*
* when the outerplan doesn't return a single tuple,
* create a dummy heaptuple anyway because we still need
@ -299,17 +301,19 @@ ExecAgg(Agg *node)
{
if (noInitValue[aggno])
{
/*
* value1 has not been initialized.
* This is the first non-NULL input value.
* We use it as the initial value for value1.
* value1 has not been initialized. This is the
* first non-NULL input value. We use it as the
* initial value for value1.
*
* But we can't just use it straight, we have to
* make a copy of it since the tuple from which it
* came will be freed on the next iteration of the
* But we can't just use it straight, we have to make
* a copy of it since the tuple from which it came
* will be freed on the next iteration of the
* scan. This requires finding out how to copy
* the Datum. We assume the datum is of the agg's
* basetype, or at least binary compatible with it.
* basetype, or at least binary compatible with
* it.
*/
Type aggBaseType = typeidType(aggref->basetype);
int attlen = typeLen(aggBaseType);
@ -330,6 +334,7 @@ ExecAgg(Agg *node)
}
else
{
/*
* apply the transition functions.
*/
@ -441,9 +446,10 @@ ExecAgg(Agg *node)
* As long as the retrieved group does not match the
* qualifications it is ignored and the next group is fetched
*/
if(node->plan.qual != NULL)
if (node->plan.qual != NULL)
qual_result = ExecQual(fix_opids(node->plan.qual), econtext);
else qual_result = false;
else
qual_result = false;
if (oneTuple)
pfree(oneTuple);
@ -576,7 +582,7 @@ ExecEndAgg(Agg *node)
*/
static Datum
aggGetAttr(TupleTableSlot *slot,
Aggref *aggref,
Aggref * aggref,
bool *isNull)
{
Datum result;
@ -623,8 +629,9 @@ aggGetAttr(TupleTableSlot *slot,
}
result = heap_getattr(heapTuple, /* tuple containing attribute */
attnum, /* attribute number of desired attribute */
tuple_type,/* tuple descriptor of tuple */
attnum, /* attribute number of desired
* attribute */
tuple_type, /* tuple descriptor of tuple */
isNull); /* return: is attribute null? */
/* ----------------

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.18 1999/02/21 03:48:40 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.19 1999/05/25 16:08:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -13,7 +13,7 @@
* columns. (ie. tuples from the same group are consecutive)
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.25 1999/02/13 23:15:21 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.26 1999/05/25 16:08:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -6,7 +6,7 @@
* Copyright (c) 1994, Regents of the University of California
*
*
* $Id: nodeHash.c,v 1.35 1999/05/18 21:33:06 tgl Exp $
* $Id: nodeHash.c,v 1.36 1999/05/25 16:08:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -81,6 +81,7 @@ ExecHash(Hash *node)
for (i = 0; i < nbatch; i++)
{
File tfile = OpenTemporaryFile();
Assert(tfile >= 0);
hashtable->innerBatchFile[i] = BufFileCreate(tfile);
}
@ -261,16 +262,19 @@ ExecHashTableCreate(Hash *node)
ntuples = outerNode->plan_size;
if (ntuples <= 0) /* force a plausible size if no info */
ntuples = 1000;
/* estimate tupsize based on footprint of tuple in hashtable...
* but what about palloc overhead?
/*
* estimate tupsize based on footprint of tuple in hashtable... but
* what about palloc overhead?
*/
tupsize = MAXALIGN(outerNode->plan_width) +
MAXALIGN(sizeof(HashJoinTupleData));
inner_rel_bytes = (double) ntuples * tupsize * FUDGE_FAC;
inner_rel_bytes = (double) ntuples *tupsize * FUDGE_FAC;
/*
* Target hashtable size is SortMem kilobytes, but not less than
* sqrt(estimated inner rel size), so as to avoid horrible performance.
* sqrt(estimated inner rel size), so as to avoid horrible
* performance.
*/
hash_table_bytes = sqrt(inner_rel_bytes);
if (hash_table_bytes < (SortMem * 1024L))
@ -278,17 +282,19 @@ ExecHashTableCreate(Hash *node)
/*
* Count the number of hash buckets we want for the whole relation,
* for an average bucket load of NTUP_PER_BUCKET (per virtual bucket!).
* for an average bucket load of NTUP_PER_BUCKET (per virtual
* bucket!).
*/
totalbuckets = (int) ceil((double) ntuples * FUDGE_FAC / NTUP_PER_BUCKET);
/*
* Count the number of buckets we think will actually fit in the
* target memory size, at a loading of NTUP_PER_BUCKET (physical buckets).
* NOTE: FUDGE_FAC here determines the fraction of the hashtable space
* reserved to allow for nonuniform distribution of hash values.
* Perhaps this should be a different number from the other uses of
* FUDGE_FAC, but since we have no real good way to pick either one...
* target memory size, at a loading of NTUP_PER_BUCKET (physical
* buckets). NOTE: FUDGE_FAC here determines the fraction of the
* hashtable space reserved to allow for nonuniform distribution of
* hash values. Perhaps this should be a different number from the
* other uses of FUDGE_FAC, but since we have no real good way to pick
* either one...
*/
bucketsize = NTUP_PER_BUCKET * tupsize;
nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
@ -297,21 +303,25 @@ ExecHashTableCreate(Hash *node)
if (totalbuckets <= nbuckets)
{
/* We have enough space, so no batching. In theory we could
* even reduce nbuckets, but since that could lead to poor
* behavior if estimated ntuples is much less than reality,
* it seems better to make more buckets instead of fewer.
/*
* We have enough space, so no batching. In theory we could even
* reduce nbuckets, but since that could lead to poor behavior if
* estimated ntuples is much less than reality, it seems better to
* make more buckets instead of fewer.
*/
totalbuckets = nbuckets;
nbatch = 0;
}
else
{
/* Need to batch; compute how many batches we want to use.
* Note that nbatch doesn't have to have anything to do with
* the ratio totalbuckets/nbuckets; in fact, it is the number
* of groups we will use for the part of the data that doesn't
* fall into the first nbuckets hash buckets.
/*
* Need to batch; compute how many batches we want to use. Note
* that nbatch doesn't have to have anything to do with the ratio
* totalbuckets/nbuckets; in fact, it is the number of groups we
* will use for the part of the data that doesn't fall into the
* first nbuckets hash buckets.
*/
nbatch = (int) ceil((inner_rel_bytes - hash_table_bytes) /
hash_table_bytes);
@ -319,12 +329,13 @@ ExecHashTableCreate(Hash *node)
nbatch = 1;
}
/* Now, totalbuckets is the number of (virtual) hashbuckets for the
/*
* Now, totalbuckets is the number of (virtual) hashbuckets for the
* whole relation, and nbuckets is the number of physical hashbuckets
* we will use in the first pass. Data falling into the first nbuckets
* virtual hashbuckets gets handled in the first pass; everything else
* gets divided into nbatch batches to be processed in additional
* passes.
* we will use in the first pass. Data falling into the first
* nbuckets virtual hashbuckets gets handled in the first pass;
* everything else gets divided into nbatch batches to be processed in
* additional passes.
*/
#ifdef HJDEBUG
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
@ -353,14 +364,16 @@ ExecHashTableCreate(Hash *node)
* ----------------
*/
i = 0;
do {
do
{
i++;
sprintf(myPortalName, "<hashtable %d>", i);
myPortal = GetPortalByName(myPortalName);
} while (PortalIsValid(myPortal));
myPortal = CreatePortal(myPortalName);
Assert(PortalIsValid(myPortal));
hashtable->myPortal = (void*) myPortal; /* kluge for circular includes */
hashtable->myPortal = (void *) myPortal; /* kluge for circular
* includes */
hashtable->hashCxt = (MemoryContext) PortalGetVariableMemory(myPortal);
hashtable->batchCxt = (MemoryContext) PortalGetHeapMemory(myPortal);
@ -392,8 +405,9 @@ ExecHashTableCreate(Hash *node)
/* The files will not be opened until later... */
}
/* Prepare portal for the first-scan space allocations;
* allocate the hashbucket array therein, and set each bucket "empty".
/*
* Prepare portal for the first-scan space allocations; allocate the
* hashbucket array therein, and set each bucket "empty".
*/
MemoryContextSwitchTo(hashtable->batchCxt);
StartPortalAllocMode(DefaultAllocMode, 0);
@ -405,9 +419,7 @@ ExecHashTableCreate(Hash *node)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
{
hashtable->buckets[i] = NULL;
}
MemoryContextSwitchTo(oldcxt);
@ -436,7 +448,7 @@ ExecHashTableDestroy(HashJoinTable hashtable)
/* Destroy the portal to release all working memory */
/* cast here is a kluge for circular includes... */
PortalDestroy((Portal*) & hashtable->myPortal);
PortalDestroy((Portal *) &hashtable->myPortal);
/* And drop the control block */
pfree(hashtable);
@ -476,7 +488,7 @@ ExecHashTableInsert(HashJoinTable hashtable,
hashTupleSize);
if (hashTuple == NULL)
elog(ERROR, "Insufficient memory for hash table.");
memcpy((char *) & hashTuple->htup,
memcpy((char *) &hashTuple->htup,
(char *) heapTuple,
sizeof(hashTuple->htup));
hashTuple->htup.t_data = (HeapTupleHeader)
@ -495,6 +507,7 @@ ExecHashTableInsert(HashJoinTable hashtable,
*/
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
hashtable->innerBatchSize[batchno]++;
ExecHashJoinSaveTuple(heapTuple,
hashtable->innerBatchFile[batchno]);
@ -566,21 +579,18 @@ ExecScanHashBucket(HashJoinState *hjstate,
HashJoinTable hashtable = hjstate->hj_HashTable;
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
/* hj_CurTuple is NULL to start scanning a new bucket, or the address
/*
* hj_CurTuple is NULL to start scanning a new bucket, or the address
* of the last tuple returned from the current bucket.
*/
if (hashTuple == NULL)
{
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
}
else
{
hashTuple = hashTuple->next;
}
while (hashTuple != NULL)
{
HeapTuple heapTuple = & hashTuple->htup;
HeapTuple heapTuple = &hashTuple->htup;
TupleTableSlot *inntuple;
bool qualResult;
@ -621,25 +631,31 @@ hashFunc(Datum key, int len, bool byVal)
unsigned int h = 0;
unsigned char *k;
if (byVal) {
if (byVal)
{
/*
* If it's a by-value data type, use the 'len' least significant bytes
* of the Datum value. This should do the right thing on either
* bigendian or littleendian hardware --- see the Datum access
* macros in c.h.
* If it's a by-value data type, use the 'len' least significant
* bytes of the Datum value. This should do the right thing on
* either bigendian or littleendian hardware --- see the Datum
* access macros in c.h.
*/
while (len-- > 0) {
while (len-- > 0)
{
h = (h * PRIME1) ^ (key & 0xFF);
key >>= 8;
}
} else {
}
else
{
/*
* If this is a variable length type, then 'k' points to a "struct
* varlena" and len == -1. NOTE: VARSIZE returns the "real" data
* length plus the sizeof the "vl_len" attribute of varlena (the
* length information). 'k' points to the beginning of the varlena
* struct, so we have to use "VARDATA" to find the beginning of the
* "real" data.
* struct, so we have to use "VARDATA" to find the beginning of
* the "real" data.
*/
if (len == -1)
{
@ -647,9 +663,7 @@ hashFunc(Datum key, int len, bool byVal)
k = (unsigned char *) VARDATA(key);
}
else
{
k = (unsigned char *) key;
}
while (len-- > 0)
h = (h * PRIME1) ^ (*k++);
}
@ -682,13 +696,14 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
StartPortalAllocMode(DefaultAllocMode, 0);
/*
* We still use the same number of physical buckets as in the first pass.
* (It could be different; but we already decided how many buckets would
* be appropriate for the allowed memory, so stick with that number.)
* We MUST set totalbuckets to equal nbuckets, because from now on
* no tuples will go out to temp files; there are no more virtual buckets,
* only real buckets. (This implies that tuples will go into different
* bucket numbers than they did on the first pass, but that's OK.)
* We still use the same number of physical buckets as in the first
* pass. (It could be different; but we already decided how many
* buckets would be appropriate for the allowed memory, so stick with
* that number.) We MUST set totalbuckets to equal nbuckets, because
* from now on no tuples will go out to temp files; there are no more
* virtual buckets, only real buckets. (This implies that tuples will
* go into different bucket numbers than they did on the first pass,
* but that's OK.)
*/
hashtable->totalbuckets = nbuckets;
@ -700,9 +715,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
{
hashtable->buckets[i] = NULL;
}
MemoryContextSwitchTo(oldcxt);
}
@ -710,6 +723,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
void
ExecReScanHash(Hash *node, ExprContext *exprCtxt, Plan *parent)
{
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.20 1999/05/18 21:33:06 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.21 1999/05/25 16:08:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -25,7 +25,7 @@
static TupleTableSlot *ExecHashJoinOuterGetTuple(Plan *node, Plan *parent,
HashJoinState *hjstate);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
BufFile * file,
TupleTableSlot *tupleSlot);
static int ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable);
static int ExecHashJoinNewBatch(HashJoinState *hjstate);
@ -133,6 +133,7 @@ ExecHashJoin(HashJoin *node)
for (i = 0; i < hashtable->nbatch; i++)
{
File tfile = OpenTemporaryFile();
Assert(tfile >= 0);
hashtable->outerBatchFile[i] = BufFileCreate(tfile);
}
@ -149,6 +150,7 @@ ExecHashJoin(HashJoin *node)
for (;;)
{
/*
* if the current outer tuple is nil, get a new one
*/
@ -159,6 +161,7 @@ ExecHashJoin(HashJoin *node)
hjstate);
if (TupIsNull(outerTupleSlot))
{
/*
* when the last batch runs out, clean up and exit
*/
@ -168,8 +171,8 @@ ExecHashJoin(HashJoin *node)
}
/*
* now we have an outer tuple, find the corresponding bucket for
* this tuple from the hash table
* now we have an outer tuple, find the corresponding bucket
* for this tuple from the hash table
*/
econtext->ecxt_outertuple = outerTupleSlot;
hjstate->hj_CurBucketNo = ExecHashGetBucket(hashtable, econtext,
@ -186,13 +189,16 @@ ExecHashJoin(HashJoin *node)
{
int batch = ExecHashJoinGetBatch(hjstate->hj_CurBucketNo,
hashtable);
if (batch > 0)
{
/*
* Need to postpone this outer tuple to a later batch.
* Save it in the corresponding outer-batch file.
*/
int batchno = batch - 1;
hashtable->outerBatchSize[batchno]++;
ExecHashJoinSaveTuple(outerTupleSlot->val,
hashtable->outerBatchFile[batchno]);
@ -212,6 +218,7 @@ ExecHashJoin(HashJoin *node)
econtext);
if (curtuple == NULL)
break; /* out of matches */
/*
* we've got a match, but still need to test qpqual
*/
@ -434,25 +441,26 @@ ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate)
if (curbatch == 0)
{ /* if it is the first pass */
slot = ExecProcNode(node, parent);
if (! TupIsNull(slot))
if (!TupIsNull(slot))
return slot;
/*
* We have just reached the end of the first pass.
* Try to switch to a saved batch.
* We have just reached the end of the first pass. Try to switch
* to a saved batch.
*/
curbatch = ExecHashJoinNewBatch(hjstate);
}
/*
* Try to read from a temp file.
* Loop allows us to advance to new batch as needed.
* Try to read from a temp file. Loop allows us to advance to new
* batch as needed.
*/
while (curbatch <= hashtable->nbatch)
{
slot = ExecHashJoinGetSavedTuple(hjstate,
hashtable->outerBatchFile[curbatch-1],
hashtable->outerBatchFile[curbatch - 1],
hjstate->hj_OuterTupleSlot);
if (! TupIsNull(slot))
if (!TupIsNull(slot))
return slot;
curbatch = ExecHashJoinNewBatch(hjstate);
}
@ -470,7 +478,7 @@ ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate)
static TupleTableSlot *
ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
BufFile * file,
TupleTableSlot *tupleSlot)
{
HeapTupleData htup;
@ -513,9 +521,10 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
if (newbatch > 1)
{
/*
* We no longer need the previous outer batch file;
* close it right away to free disk space.
* We no longer need the previous outer batch file; close it right
* away to free disk space.
*/
BufFileClose(hashtable->outerBatchFile[newbatch - 2]);
hashtable->outerBatchFile[newbatch - 2] = NULL;
@ -541,8 +550,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
return newbatch; /* no more batches */
/*
* Rewind inner and outer batch files for this batch,
* so that we can start reading them.
* Rewind inner and outer batch files for this batch, so that we can
* start reading them.
*/
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0L,
SEEK_SET) != 0L)
@ -571,7 +580,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
}
/*
* after we build the hash table, the inner batch file is no longer needed
* after we build the hash table, the inner batch file is no longer
* needed
*/
BufFileClose(innerFile);
hashtable->innerBatchFile[newbatch - 1] = NULL;
@ -615,7 +625,7 @@ ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable)
void
ExecHashJoinSaveTuple(HeapTuple heapTuple,
BufFile *file)
BufFile * file)
{
size_t written;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.35 1999/05/10 00:45:06 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.36 1999/05/25 16:08:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -98,6 +98,7 @@ IndexNext(IndexScan *node)
bool bBackward;
int indexNumber;
/* ----------------
* extract necessary information from index scan node
* ----------------
@ -114,9 +115,9 @@ IndexNext(IndexScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now.
* We could introduce new nodes for this case and handle
* IndexScan --> NewNode switching in Init/ReScan plan...
* Additional checking is not good, but no other way for now. We could
* introduce new nodes for this case and handle IndexScan --> NewNode
* switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
@ -703,7 +704,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
run_keys = (n_keys <= 0) ? NULL :
(int *) palloc(n_keys * sizeof(int));
CXT1_printf("ExecInitIndexScan: context is %d\n",CurrentMemoryContext);
CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext);
/* ----------------
* for each opclause in the given qual,

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.21 1999/02/13 23:15:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.22 1999/05/25 16:08:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.26 1999/05/10 00:45:07 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.27 1999/05/25 16:08:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -103,7 +103,7 @@ static bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
* ----------------------------------------------------------------
*/
static List *
MJFormSkipQual(List *qualList, char * replaceopname)
MJFormSkipQual(List *qualList, char *replaceopname)
{
List *qualCopy;
List *qualcdr;

View File

@ -27,7 +27,7 @@
* SeqScan (emp.all)
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.10 1999/03/20 01:13:22 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.11 1999/05/25 16:08:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -278,7 +278,8 @@ ExecEndResult(Result *node)
* ----------------
*/
ExecClearTuple(resstate->cstate.cs_ResultTupleSlot);
pfree(resstate); node->resstate = NULL; /* XXX - new for us - er1p */
pfree(resstate);
node->resstate = NULL; /* XXX - new for us - er1p */
}
void

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.17 1999/02/13 23:15:26 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.18 1999/05/25 16:08:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -68,9 +68,9 @@ SeqNext(SeqScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now.
* We could introduce new nodes for this case and handle
* SeqScan --> NewNode switching in Init/ReScan plan...
* Additional checking is not good, but no other way for now. We could
* introduce new nodes for this case and handle SeqScan --> NewNode
* switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scanrelid - 1] != NULL)
@ -83,10 +83,11 @@ SeqNext(SeqScan *node)
return (slot);
}
slot->val = estate->es_evTuple[node->scanrelid - 1];
/*
* Note that unlike IndexScan, SeqScan never use keys
* in heap_beginscan (and this is very bad) - so, here
* we have not check are keys ok or not.
* Note that unlike IndexScan, SeqScan never use keys in
* heap_beginscan (and this is very bad) - so, here we have not
* check are keys ok or not.
*/
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[node->scanrelid - 1] = true;
@ -401,7 +402,8 @@ ExecSeqReScan(SeqScan *node, ExprContext *exprCtxt, Plan *parent)
outerPlan = outerPlan((Plan *) node);
ExecReScan(outerPlan, exprCtxt, parent);
}
else /* otherwise, we are scanning a relation */
else
/* otherwise, we are scanning a relation */
{
/* If this is re-scanning of PlanQual ... */
if (estate->es_evTuple != NULL &&

View File

@ -58,15 +58,16 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
ExecReScan(plan, (ExprContext *) NULL, plan);
/*
* For all sublink types except EXPR_SUBLINK, the result type is boolean,
* and we have a fairly clear idea of how to combine multiple subitems
* and deal with NULL values or an empty subplan result.
* For all sublink types except EXPR_SUBLINK, the result type is
* boolean, and we have a fairly clear idea of how to combine multiple
* subitems and deal with NULL values or an empty subplan result.
*
* For EXPR_SUBLINK, the result type is whatever the combining operator
* returns. We have no way to deal with more than one column in the
* subplan result --- hopefully the parser forbids that. More seriously,
* it's unclear what to do with NULL values or an empty subplan result.
* For now, we error out, but should something else happen?
* subplan result --- hopefully the parser forbids that. More
* seriously, it's unclear what to do with NULL values or an empty
* subplan result. For now, we error out, but should something else
* happen?
*/
for (slot = ExecProcNode(plan, plan);
@ -105,14 +106,14 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
}
if (subLinkType != EXPR_SUBLINK)
{
if ((! (bool) result && !(sublink->useor)) ||
if ((!(bool) result && !(sublink->useor)) ||
((bool) result && sublink->useor))
break;
}
i++;
}
if (subLinkType == ALL_SUBLINK && ! (bool) result)
if (subLinkType == ALL_SUBLINK && !(bool) result)
break;
if (subLinkType == ANY_SUBLINK && (bool) result)
break;

View File

@ -3,7 +3,7 @@
* spi.c
* Server Programming Interface
*
* $Id: spi.c,v 1.37 1999/05/13 07:28:30 tgl Exp $
* $Id: spi.c,v 1.38 1999/05/25 16:08:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -411,7 +411,7 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
val = heap_getattr(tuple, fnumber, tupdesc, &isnull);
if (isnull)
return NULL;
if (! getTypeOutAndElem((Oid) tupdesc->attrs[fnumber - 1]->atttypid,
if (!getTypeOutAndElem((Oid) tupdesc->attrs[fnumber - 1]->atttypid,
&foutoid, &typelem))
{
SPI_result = SPI_ERROR_NOOUTFUNC;
@ -555,7 +555,7 @@ SPI_pfree(void *pointer)
*
*/
void
spi_printtup(HeapTuple tuple, TupleDesc tupdesc, DestReceiver* self)
spi_printtup(HeapTuple tuple, TupleDesc tupdesc, DestReceiver * self)
{
SPITupleTable *tuptable;
MemoryContext oldcxt;
@ -633,12 +633,13 @@ _SPI_execute(char *src, int tcount, _SPI_plan *plan)
_SPI_current->qtlist = queryTree_list;
foreach (queryTree_list_item, queryTree_list)
foreach(queryTree_list_item, queryTree_list)
{
queryTree = (Query *) lfirst(queryTree_list_item);
planTree = lfirst(planTree_list);
planTree_list = lnext(planTree_list);
islastquery = (planTree_list == NIL); /* assume lists are same len */
islastquery = (planTree_list == NIL); /* assume lists are same
* len */
if (queryTree->commandType == CMD_UTILITY)
{
@ -658,7 +659,7 @@ _SPI_execute(char *src, int tcount, _SPI_plan *plan)
if (plan == NULL)
{
ProcessUtility(queryTree->utilityStmt, None);
if (! islastquery)
if (!islastquery)
CommandCounterIncrement();
else
return res;
@ -717,17 +718,18 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, char *Nulls, int tcount)
_SPI_current->tuptable = NULL;
_SPI_current->qtlist = NULL;
foreach (queryTree_list_item, queryTree_list)
foreach(queryTree_list_item, queryTree_list)
{
queryTree = (Query *) lfirst(queryTree_list_item);
planTree = lfirst(planTree_list);
planTree_list = lnext(planTree_list);
islastquery = (planTree_list == NIL); /* assume lists are same len */
islastquery = (planTree_list == NIL); /* assume lists are same
* len */
if (queryTree->commandType == CMD_UTILITY)
{
ProcessUtility(queryTree->utilityStmt, None);
if (! islastquery)
if (!islastquery)
CommandCounterIncrement();
else
return SPI_OK_UTILITY;
@ -836,13 +838,13 @@ _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount)
tcount_const.type = T_Const;
tcount_const.consttype = INT4OID;
tcount_const.constlen = sizeof(int4);
tcount_const.constvalue = (Datum)tcount;
tcount_const.constvalue = (Datum) tcount;
tcount_const.constisnull = FALSE;
tcount_const.constbyval = TRUE;
tcount_const.constisset = FALSE;
tcount_const.constiscast = FALSE;
count = (Node *)&tcount_const;
count = (Node *) &tcount_const;
}
if (state == NULL) /* plan preparation */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.10 1999/02/13 23:15:34 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.11 1999/05/25 16:08:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: stringinfo.c,v 1.15 1999/04/25 03:19:25 tgl Exp $
* $Id: stringinfo.c,v 1.16 1999/05/25 16:08:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -75,9 +75,10 @@ enlargeStringInfo(StringInfo str, int needed)
return; /* got enough space already */
/*
* We don't want to allocate just a little more space with each append;
* for efficiency, double the buffer size each time it overflows.
* Actually, we might need to more than double it if 'needed' is big...
* We don't want to allocate just a little more space with each
* append; for efficiency, double the buffer size each time it
* overflows. Actually, we might need to more than double it if
* 'needed' is big...
*/
newlen = 2 * str->maxlen;
while (needed > newlen)
@ -107,7 +108,7 @@ enlargeStringInfo(StringInfo str, int needed)
* generated in a single call (not on the total string length).
*/
void
appendStringInfo(StringInfo str, const char *fmt, ...)
appendStringInfo(StringInfo str, const char *fmt,...)
{
va_list args;
char buffer[1024];
@ -164,7 +165,8 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
memcpy(str->data + str->len, data, datalen);
str->len += datalen;
/* Keep a trailing null in place, even though it's probably useless
/*
* Keep a trailing null in place, even though it's probably useless
* for binary data...
*/
str->data[str->len] = '\0';

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.35 1999/04/16 04:59:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.36 1999/05/25 16:08:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -77,8 +77,8 @@ pg_krb4_recvauth(Port *port)
{
long krbopts = 0; /* one-way authentication */
KTEXT_ST clttkt;
char instance[INST_SZ+1],
version[KRB_SENDAUTH_VLEN+1];
char instance[INST_SZ + 1],
version[KRB_SENDAUTH_VLEN + 1];
AUTH_DAT auth_data;
Key_schedule key_sched;
int status;
@ -449,9 +449,9 @@ be_recvauth(Port *port)
/*
* Get the authentication method to use for this frontend/database
* combination. Note: a failure return indicates a problem with
* the hba config file, not with the request. hba.c should have
* dropped an error message into the postmaster logfile if it failed.
* combination. Note: a failure return indicates a problem with the
* hba config file, not with the request. hba.c should have dropped
* an error message into the postmaster logfile if it failed.
*/
if (hba_getauthmethod(&port->raddr, port->user, port->database,
@ -476,18 +476,19 @@ be_recvauth(Port *port)
switch (port->auth_method)
{
case uaReject:
/*
* This could have come from an explicit "reject" entry
* in pg_hba.conf, but more likely it means there was no
* matching entry. Take pity on the poor user and issue
* a helpful error message. NOTE: this is not a security
* breach, because all the info reported here is known
* at the frontend and must be assumed known to bad guys.
* This could have come from an explicit "reject" entry in
* pg_hba.conf, but more likely it means there was no
* matching entry. Take pity on the poor user and issue a
* helpful error message. NOTE: this is not a security
* breach, because all the info reported here is known at
* the frontend and must be assumed known to bad guys.
* We're merely helping out the less clueful good guys.
* NOTE 2: libpq-be.h defines the maximum error message
* length as 99 characters. It probably wouldn't hurt
* anything to increase it, but there might be some
* client out there that will fail. So, be terse.
* anything to increase it, but there might be some client
* out there that will fail. So, be terse.
*/
{
char buffer[512];

View File

@ -6,7 +6,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: be-dumpdata.c,v 1.23 1999/05/10 00:45:08 momjian Exp $
* $Id: be-dumpdata.c,v 1.24 1999/05/25 16:08:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -208,7 +208,7 @@ be_typeinit(PortalEntry *entry,
* ----------------
*/
void
be_printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
be_printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
int i;
Datum attr;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.32 1999/05/10 00:45:09 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.33 1999/05/25 16:08:57 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@ -149,7 +149,7 @@ lo_read(int fd, char *buf, int len)
status = inv_read(cookies[fd], buf, len);
MemoryContextSwitchTo(currentContext);
return(status);
return (status);
}
int
@ -173,7 +173,7 @@ lo_write(int fd, char *buf, int len)
status = inv_write(cookies[fd], buf, len);
MemoryContextSwitchTo(currentContext);
return(status);
return (status);
}
@ -374,9 +374,7 @@ lo_export(Oid lobjId, text *filename)
*/
lobj = inv_open(lobjId, INV_READ);
if (lobj == NULL)
{
elog(ERROR, "lo_export: can't open inv object %u", lobjId);
}
/*
* open the file to be written to

View File

@ -9,7 +9,7 @@
* Dec 17, 1997 - Todd A. Brandys
* Orignal Version Completed.
*
* $Id: crypt.c,v 1.16 1999/05/09 00:54:30 tgl Exp $
* $Id: crypt.c,v 1.17 1999/05/25 16:08:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -147,9 +147,7 @@ crypt_loadpwdfile()
{ /* free the old data only if this is a
* reload */
while (pwd_cache_count--)
{
pfree((void *) pwd_cache[pwd_cache_count]);
}
pfree((void *) pwd_cache);
pwd_cache = NULL;
pwd_cache_count = 0;
@ -269,20 +267,14 @@ crypt_verify(Port *port, const char *user, const char *pgpass)
current;
if (crypt_getloginfo(user, &passwd, &valuntil) == STATUS_ERROR)
{
return STATUS_ERROR;
}
if (passwd == NULL || *passwd == '\0')
{
if (passwd)
{
pfree((void *) passwd);
}
if (valuntil)
{
pfree((void *) valuntil);
}
return STATUS_ERROR;
}
@ -296,33 +288,24 @@ crypt_verify(Port *port, const char *user, const char *pgpass)
if (!strcmp(pgpass, crypt_pwd))
{
/*
* check here to be sure we are not past valuntil
*/
if (!valuntil || strcmp(valuntil, "\\N") == 0)
{
vuntil = INVALID_ABSTIME;
}
else
{
vuntil = nabstimein(valuntil);
}
current = GetCurrentAbsoluteTime();
if (vuntil != INVALID_ABSTIME && vuntil < current)
{
retval = STATUS_ERROR;
}
else
{
retval = STATUS_OK;
}
}
pfree((void *) passwd);
if (valuntil)
{
pfree((void *) valuntil);
}
return retval;
}

View File

@ -5,7 +5,7 @@
* wherein you authenticate a user by seeing what IP address the system
* says he comes from and possibly using ident).
*
* $Id: hba.c,v 1.42 1999/05/10 15:17:16 momjian Exp $
* $Id: hba.c,v 1.43 1999/05/25 16:08:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -313,6 +313,7 @@ process_open_config_file(FILE *file, SockAddr *raddr, const char *user,
{
/* Process a line from the config file */
int c = getc(file);
if (c == EOF)
eof = true;
else
@ -394,7 +395,8 @@ find_hba_entry(SockAddr *raddr, const char *user, const char *database,
}
else
{
char *conf_file; /* The name of the config file we have to read */
char *conf_file; /* The name of the config file we have to
* read */
/* put together the full pathname to the config file */
bufsize = (strlen(DataDir) + strlen(CONF_FILE) + 2) * sizeof(char);
@ -531,8 +533,10 @@ ident(const struct in_addr remote_ip_addr, const struct in_addr local_ip_addr,
----------------------------------------------------------------------------*/
int sock_fd, /* File descriptor for socket on which we talk to Ident */
rc; /* Return code from a locally called function */
int sock_fd, /* File descriptor for socket on which we
* talk to Ident */
rc; /* Return code from a locally called
* function */
sock_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
if (sock_fd == -1)
@ -559,14 +563,14 @@ ident(const struct in_addr remote_ip_addr, const struct in_addr local_ip_addr,
/*
* Bind to the address which the client originally contacted,
* otherwise the ident server won't be able to match up the
* right connection. This is necessary if the PostgreSQL
* server is running on an IP alias.
* otherwise the ident server won't be able to match up the right
* connection. This is necessary if the PostgreSQL server is
* running on an IP alias.
*/
memset(&la, 0, sizeof(la));
la.sin_family = AF_INET;
la.sin_addr = local_ip_addr;
rc = bind(sock_fd, (struct sockaddr *) &la, sizeof(la));
rc = bind(sock_fd, (struct sockaddr *) & la, sizeof(la));
if (rc == 0)
{
rc = connect(sock_fd,
@ -770,18 +774,15 @@ verify_against_usermap(const char *pguser,
else if (strcmp(usermap_name, "sameuser") == 0)
{
if (strcmp(ident_username, pguser) == 0)
{
*checks_out_p = true;
}
else
{
*checks_out_p = false;
}
}
else
{
FILE *file; /* The map file we have to read */
char *map_file; /* The name of the map file we have to read */
char *map_file; /* The name of the map file we have to
* read */
int bufsize;
/* put together the full pathname to the map file */
@ -971,9 +972,7 @@ GetCharSetByHost(char *TableName, int host, const char *DataDir)
file = AllocateFile(map_file, "rb");
#endif
if (file == NULL)
{
return;
}
while (!eof)
{
c = getc(file);

Some files were not shown because too many files have changed in this diff Show More