pgindent run. Make it all clean.

This commit is contained in:
Bruce Momjian 2001-03-22 04:01:46 +00:00
parent 6cf8707b82
commit 9e1552607a
555 changed files with 32514 additions and 28110 deletions

View File

@ -20,30 +20,32 @@ unsigned int parse_buffer_pos( void );
extern void cube_flush_scanner_buffer(void); /* defined in cubescan.l */
void set_parse_buffer( char* s )
void
set_parse_buffer(char *s)
{
PARSE_BUFFER = s;
PARSE_BUFFER_SIZE = strlen(s);
if ( PARSE_BUFFER_SIZE == 0 ) {
if (PARSE_BUFFER_SIZE == 0)
elog(ERROR, "cube_in: can't parse an empty string");
}
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
}
void reset_parse_buffer( void )
void
reset_parse_buffer(void)
{
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
cube_flush_scanner_buffer();
}
int read_parse_buffer( void )
int
read_parse_buffer(void)
{
int c;
/*
c = *PARSE_BUFFER_PTR++;
SCANNER_POS++;
* c = *PARSE_BUFFER_PTR++; SCANNER_POS++;
*/
c = PARSE_BUFFER[SCANNER_POS];
if (SCANNER_POS < PARSE_BUFFER_SIZE)
@ -51,29 +53,32 @@ int read_parse_buffer( void )
return c;
}
char * parse_buffer( void )
char *
parse_buffer(void)
{
return PARSE_BUFFER;
}
unsigned int parse_buffer_curr_char( void )
unsigned int
parse_buffer_curr_char(void)
{
return PARSE_BUFFER[SCANNER_POS];
}
char * parse_buffer_ptr( void )
char *
parse_buffer_ptr(void)
{
return PARSE_BUFFER_PTR;
}
unsigned int parse_buffer_pos( void )
unsigned int
parse_buffer_pos(void)
{
return SCANNER_POS;
}
unsigned int parse_buffer_size( void )
unsigned int
parse_buffer_size(void)
{
return PARSE_BUFFER_SIZE;
}

View File

@ -92,9 +92,8 @@ cube_in(char *str)
set_parse_buffer(str);
if ( cube_yyparse(&result) != 0 ) {
if (cube_yyparse(&result) != 0)
return NULL;
}
return ((NDBOX *) result);
}
@ -121,22 +120,26 @@ cube_out(NDBOX *cube)
p = result = (char *) palloc(100);
/* while printing the first (LL) corner, check if it is equal
to the scond one */
/*
* while printing the first (LL) corner, check if it is equal to the
* scond one
*/
p += sprintf(p, "(");
for ( i=0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
p += sprintf(p, "%g", cube->x[i]);
p += sprintf(p, ", ");
if ( cube->x[i] != cube->x[i+dim] ) {
if (cube->x[i] != cube->x[i + dim])
equal = 0;
}
}
p -= 2; /* get rid of the last ", " */
p += sprintf(p, ")");
if ( !equal ) {
if (!equal)
{
p += sprintf(p, ",(");
for ( i=dim; i < dim * 2; i++ ) {
for (i = dim; i < dim * 2; i++)
{
p += sprintf(p, "%g", cube->x[i]);
p += sprintf(p, ", ");
}
@ -163,9 +166,10 @@ g_cube_consistent(GISTENTRY *entry,
NDBOX * query,
StrategyNumber strategy)
{
/*
** if entry is not leaf, use g_cube_internal_consistent,
** else use g_cube_leaf_consistent
* * if entry is not leaf, use g_cube_internal_consistent, * else use
* g_cube_leaf_consistent
*/
if (GIST_LEAF(entry))
return (g_cube_leaf_consistent((NDBOX *) (entry->pred), query, strategy));
@ -181,28 +185,35 @@ g_cube_consistent(GISTENTRY *entry,
NDBOX *
g_cube_union(bytea *entryvec, int *sizep)
{
int numranges, i;
int numranges,
i;
NDBOX *out = (NDBOX *) NULL;
NDBOX *tmp;
/*
fprintf(stderr, "union\n");
* fprintf(stderr, "union\n");
*/
numranges = (VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY);
tmp = (NDBOX *) (((GISTENTRY *) (VARDATA(entryvec)))[0]).pred;
/*
* sizep = sizeof(NDBOX); -- NDBOX has variable size
*/
*sizep = tmp->size;
for (i = 1; i < numranges; i++) {
for (i = 1; i < numranges; i++)
{
out = g_cube_binary_union(tmp, (NDBOX *)
(((GISTENTRY *) (VARDATA(entryvec)))[i]).pred,
sizep);
/*
fprintf(stderr, "\t%s ^ %s -> %s\n", cube_out(tmp), cube_out((NDBOX *)(((GISTENTRY *)(VARDATA(entryvec)))[i]).pred), cube_out(out));
* fprintf(stderr, "\t%s ^ %s -> %s\n", cube_out(tmp),
* cube_out((NDBOX *)(((GISTENTRY
* *)(VARDATA(entryvec)))[i]).pred), cube_out(out));
*/
if (i > 1) pfree(tmp);
if (i > 1)
pfree(tmp);
tmp = out;
}
@ -233,16 +244,17 @@ float *
g_cube_penalty(GISTENTRY *origentry, GISTENTRY *newentry, float *result)
{
Datum ud;
float tmp1, tmp2;
float tmp1,
tmp2;
ud = (Datum) cube_union((NDBOX *) (origentry->pred), (NDBOX *) (newentry->pred));
rt_cube_size((NDBOX *) ud, &tmp1);
rt_cube_size((NDBOX *) (origentry->pred), &tmp2);
*result = tmp1 - tmp2;
pfree((char *) ud);
/*
fprintf(stderr, "penalty\n");
fprintf(stderr, "\t%g\n", *result);
* fprintf(stderr, "penalty\n"); fprintf(stderr, "\t%g\n", *result);
*/
return (result);
}
@ -257,22 +269,34 @@ GIST_SPLITVEC *
g_cube_picksplit(bytea *entryvec,
GIST_SPLITVEC *v)
{
OffsetNumber i, j;
NDBOX *datum_alpha, *datum_beta;
NDBOX *datum_l, *datum_r;
NDBOX *union_d, *union_dl, *union_dr;
OffsetNumber i,
j;
NDBOX *datum_alpha,
*datum_beta;
NDBOX *datum_l,
*datum_r;
NDBOX *union_d,
*union_dl,
*union_dr;
NDBOX *inter_d;
bool firsttime;
float size_alpha, size_beta, size_union, size_inter;
float size_waste, waste;
float size_l, size_r;
float size_alpha,
size_beta,
size_union,
size_inter;
float size_waste,
waste;
float size_l,
size_r;
int nbytes;
OffsetNumber seed_1 = 0, seed_2 = 0;
OffsetNumber *left, *right;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
OffsetNumber maxoff;
/*
fprintf(stderr, "picksplit\n");
* fprintf(stderr, "picksplit\n");
*/
maxoff = ((VARSIZE(entryvec) - VARHDRSZ) / sizeof(GISTENTRY)) - 2;
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@ -282,9 +306,11 @@ g_cube_picksplit(bytea *entryvec,
firsttime = true;
waste = 0.0;
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i))
{
datum_alpha = (NDBOX *) (((GISTENTRY *) (VARDATA(entryvec)))[i].pred);
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j)) {
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j))
{
datum_beta = (NDBOX *) (((GISTENTRY *) (VARDATA(entryvec)))[j].pred);
/* compute the wasted space by unioning these guys */
@ -301,11 +327,12 @@ g_cube_picksplit(bytea *entryvec,
pfree(inter_d);
/*
* are these a more promising split than what we've
* already seen?
* are these a more promising split than what we've already
* seen?
*/
if (size_waste > waste || firsttime) {
if (size_waste > waste || firsttime)
{
waste = size_waste;
seed_1 = i;
seed_2 = j;
@ -328,31 +355,34 @@ g_cube_picksplit(bytea *entryvec,
/*
* Now split up the regions between the two seeds. An important
* property of this split algorithm is that the split vector v
* has the indices of items to be split in order in its left and
* right vectors. We exploit this property by doing a merge in
* the code that actually splits the page.
* property of this split algorithm is that the split vector v has the
* indices of items to be split in order in its left and right
* vectors. We exploit this property by doing a merge in the code
* that actually splits the page.
*
* For efficiency, we also place the new index tuple in this loop.
* This is handled at the very end, when we have placed all the
* existing tuples and i == maxoff + 1.
* For efficiency, we also place the new index tuple in this loop. This
* is handled at the very end, when we have placed all the existing
* tuples and i == maxoff + 1.
*/
maxoff = OffsetNumberNext(maxoff);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
/*
* If we've already decided where to place this item, just
* put it on the right list. Otherwise, we need to figure
* out which page needs the least enlargement in order to
* store the item.
* If we've already decided where to place this item, just put it
* on the right list. Otherwise, we need to figure out which page
* needs the least enlargement in order to store the item.
*/
if (i == seed_1) {
if (i == seed_1)
{
*left++ = i;
v->spl_nleft++;
continue;
} else if (i == seed_2) {
}
else if (i == seed_2)
{
*right++ = i;
v->spl_nright++;
continue;
@ -366,14 +396,17 @@ g_cube_picksplit(bytea *entryvec,
rt_cube_size((NDBOX *) union_dr, &size_beta);
/* pick which page to add it to */
if (size_alpha - size_l < size_beta - size_r) {
if (size_alpha - size_l < size_beta - size_r)
{
pfree(datum_l);
pfree(union_dr);
datum_l = union_dl;
size_l = size_alpha;
*left++ = i;
v->spl_nleft++;
} else {
}
else
{
pfree(datum_r);
pfree(union_dl);
datum_r = union_dr;
@ -398,9 +431,11 @@ g_cube_same(NDBOX *b1, NDBOX *b2, bool *result)
{
if (cube_same(b1, b2))
*result = TRUE;
else *result = FALSE;
else
*result = FALSE;
/*
fprintf(stderr, "same: %s\n", (*result ? "TRUE" : "FALSE" ));
* fprintf(stderr, "same: %s\n", (*result ? "TRUE" : "FALSE" ));
*/
return (result);
}
@ -416,9 +451,10 @@ g_cube_leaf_consistent(NDBOX *key,
bool retval;
/*
fprintf(stderr, "leaf_consistent, %d\n", strategy);
* fprintf(stderr, "leaf_consistent, %d\n", strategy);
*/
switch(strategy) {
switch (strategy)
{
case RTLeftStrategyNumber:
retval = (bool) cube_left(key, query);
break;
@ -457,9 +493,10 @@ g_cube_internal_consistent(NDBOX *key,
bool retval;
/*
fprintf(stderr, "internal_consistent, %d\n", strategy);
* fprintf(stderr, "internal_consistent, %d\n", strategy);
*/
switch(strategy) {
switch (strategy)
{
case RTLeftStrategyNumber:
case RTOverLeftStrategyNumber:
retval = (bool) cube_over_left(key, query);
@ -497,47 +534,56 @@ g_cube_binary_union(NDBOX *r1, NDBOX *r2, int *sizep)
/* cube_union */
NDBOX *cube_union(NDBOX *box_a, NDBOX *box_b)
NDBOX *
cube_union(NDBOX * box_a, NDBOX * box_b)
{
int i;
NDBOX *result;
NDBOX *a = swap_corners(box_a);
NDBOX *b = swap_corners(box_b);
if ( a->dim >= b->dim ) {
if (a->dim >= b->dim)
{
result = palloc(a->size);
result->size = a->size;
result->dim = a->dim;
}
else {
else
{
result = palloc(b->size);
result->size = b->size;
result->dim = b->dim;
}
/* swap the box pointers if needed */
if ( a->dim < b->dim ) {
NDBOX * tmp = b; b = a; a = tmp;
if (a->dim < b->dim)
{
NDBOX *tmp = b;
b = a;
a = tmp;
}
/* use the potentially smaller of the two boxes (b) to fill in
the result, padding absent dimensions with zeroes*/
for ( i = 0; i < b->dim; i++ ) {
/*
* use the potentially smaller of the two boxes (b) to fill in the
* result, padding absent dimensions with zeroes
*/
for (i = 0; i < b->dim; i++)
{
result->x[i] = b->x[i];
result->x[i + a->dim] = b->x[i + b->dim];
}
for ( i = b->dim; i < a->dim; i++ ) {
for (i = b->dim; i < a->dim; i++)
{
result->x[i] = 0;
result->x[i + a->dim] = 0;
}
/* compute the union */
for ( i = 0; i < a->dim; i++ ) {
for (i = 0; i < a->dim; i++)
result->x[i] = min(a->x[i], result->x[i]);
}
for ( i = a->dim; i < a->dim * 2; i++ ) {
for (i = a->dim; i < a->dim * 2; i++)
result->x[i] = max(a->x[i], result->x[i]);
}
pfree(a);
pfree(b);
@ -546,67 +592,80 @@ NDBOX *cube_union(NDBOX *box_a, NDBOX *box_b)
}
/* cube_inter */
NDBOX *cube_inter(NDBOX *box_a, NDBOX *box_b)
NDBOX *
cube_inter(NDBOX * box_a, NDBOX * box_b)
{
int i;
NDBOX *result;
NDBOX *a = swap_corners(box_a);
NDBOX *b = swap_corners(box_b);
if ( a->dim >= b->dim ) {
if (a->dim >= b->dim)
{
result = palloc(a->size);
result->size = a->size;
result->dim = a->dim;
}
else {
else
{
result = palloc(b->size);
result->size = b->size;
result->dim = b->dim;
}
/* swap the box pointers if needed */
if ( a->dim < b->dim ) {
NDBOX * tmp = b; b = a; a = tmp;
if (a->dim < b->dim)
{
NDBOX *tmp = b;
b = a;
a = tmp;
}
/* use the potentially smaller of the two boxes (b) to fill in
the result, padding absent dimensions with zeroes*/
for ( i = 0; i < b->dim; i++ ) {
/*
* use the potentially smaller of the two boxes (b) to fill in the
* result, padding absent dimensions with zeroes
*/
for (i = 0; i < b->dim; i++)
{
result->x[i] = b->x[i];
result->x[i + a->dim] = b->x[i + b->dim];
}
for ( i = b->dim; i < a->dim; i++ ) {
for (i = b->dim; i < a->dim; i++)
{
result->x[i] = 0;
result->x[i + a->dim] = 0;
}
/* compute the intersection */
for ( i = 0; i < a->dim; i++ ) {
for (i = 0; i < a->dim; i++)
result->x[i] = max(a->x[i], result->x[i]);
}
for ( i = a->dim; i < a->dim * 2; i++ ) {
for (i = a->dim; i < a->dim * 2; i++)
result->x[i] = min(a->x[i], result->x[i]);
}
pfree(a);
pfree(b);
/* Is it OK to return a non-null intersection for non-overlapping boxes? */
/*
* Is it OK to return a non-null intersection for non-overlapping
* boxes?
*/
return (result);
}
/* cube_size */
float *cube_size(NDBOX *a)
float *
cube_size(NDBOX * a)
{
int i,j;
int i,
j;
float *result;
result = (float *) palloc(sizeof(float));
*result = 1.0;
for ( i = 0, j = a->dim; i < a->dim; i++,j++ ) {
for (i = 0, j = a->dim; i < a->dim; i++, j++)
*result = (*result) * abs((a->x[j] - a->x[i]));
}
return (result);
}
@ -614,15 +673,17 @@ float *cube_size(NDBOX *a)
void
rt_cube_size(NDBOX * a, float *size)
{
int i,j;
int i,
j;
if (a == (NDBOX *) NULL)
*size = 0.0;
else {
else
{
*size = 1.0;
for ( i = 0, j = a->dim; i < a->dim; i++,j++ ) {
for (i = 0, j = a->dim; i < a->dim; i++, j++)
*size = (*size) * abs((a->x[j] - a->x[i]));
}
}
return;
}
@ -633,7 +694,8 @@ rt_cube_size(NDBOX *a, float *size)
/* is the right edge of (a) located to the left of
the right edge of (b)? */
bool cube_over_left(NDBOX *box_a, NDBOX *box_b)
bool
cube_over_left(NDBOX * box_a, NDBOX * box_b)
{
NDBOX *a;
NDBOX *b;
@ -649,7 +711,8 @@ bool cube_over_left(NDBOX *box_a, NDBOX *box_b)
/* is the left edge of (a) located to the right of
the left edge of (b)? */
bool cube_over_right(NDBOX *box_a, NDBOX *box_b)
bool
cube_over_right(NDBOX * box_a, NDBOX * box_b)
{
NDBOX *a;
NDBOX *b;
@ -666,7 +729,8 @@ bool cube_over_right(NDBOX *box_a, NDBOX *box_b)
/* return 'true' if the projection of 'a' is
entirely on the left of the projection of 'b' */
bool cube_left(NDBOX *box_a, NDBOX *box_b)
bool
cube_left(NDBOX * box_a, NDBOX * box_b)
{
NDBOX *a;
NDBOX *b;
@ -682,7 +746,8 @@ bool cube_left(NDBOX *box_a, NDBOX *box_b)
/* return 'true' if the projection of 'a' is
entirely on the right of the projection of 'b' */
bool cube_right(NDBOX *box_a, NDBOX *box_b)
bool
cube_right(NDBOX * box_a, NDBOX * box_b)
{
NDBOX *a;
NDBOX *b;
@ -698,7 +763,8 @@ bool cube_right(NDBOX *box_a, NDBOX *box_b)
/* make up a metric in which one box will be 'lower' than the other
-- this can be useful for srting and to determine uniqueness */
bool cube_lt(NDBOX *box_a, NDBOX *box_b)
bool
cube_lt(NDBOX * box_a, NDBOX * box_b)
{
int i;
int dim;
@ -712,24 +778,28 @@ bool cube_lt(NDBOX *box_a, NDBOX *box_b)
b = swap_corners(box_b);
dim = min(a->dim, b->dim);
/* if all common dimensions are equal, the cube with more dimensions wins */
if ( cube_same(a, b) ) {
if (a->dim < b->dim) {
/*
* if all common dimensions are equal, the cube with more dimensions
* wins
*/
if (cube_same(a, b))
{
if (a->dim < b->dim)
return (TRUE);
}
else {
else
return (FALSE);
}
}
/* compare the common dimensions */
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (a->x[i] > b->x[i])
return (FALSE);
if (a->x[i] < b->x[i])
return (TRUE);
}
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (a->x[i + a->dim] > b->x[i + b->dim])
return (FALSE);
if (a->x[i + a->dim] < b->x[i + b->dim])
@ -737,28 +807,34 @@ bool cube_lt(NDBOX *box_a, NDBOX *box_b)
}
/* compare extra dimensions to zero */
if ( a->dim > b->dim ) {
for ( i = dim; i < a->dim; i++ ) {
if (a->dim > b->dim)
{
for (i = dim; i < a->dim; i++)
{
if (a->x[i] > 0)
return (FALSE);
if (a->x[i] < 0)
return (TRUE);
}
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (a->x[i + a->dim] > 0)
return (FALSE);
if (a->x[i + a->dim] < 0)
return (TRUE);
}
}
if ( a->dim < b->dim ) {
for ( i = dim; i < b->dim; i++ ) {
if (a->dim < b->dim)
{
for (i = dim; i < b->dim; i++)
{
if (b->x[i] > 0)
return (TRUE);
if (b->x[i] < 0)
return (FALSE);
}
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (b->x[i + b->dim] > 0)
return (TRUE);
if (b->x[i + b->dim] < 0)
@ -770,7 +846,8 @@ bool cube_lt(NDBOX *box_a, NDBOX *box_b)
}
bool cube_gt(NDBOX *box_a, NDBOX *box_b)
bool
cube_gt(NDBOX * box_a, NDBOX * box_b)
{
int i;
int dim;
@ -784,24 +861,28 @@ bool cube_gt(NDBOX *box_a, NDBOX *box_b)
b = swap_corners(box_b);
dim = min(a->dim, b->dim);
/* if all common dimensions are equal, the cube with more dimensions wins */
if ( cube_same(a, b) ) {
if (a->dim > b->dim) {
/*
* if all common dimensions are equal, the cube with more dimensions
* wins
*/
if (cube_same(a, b))
{
if (a->dim > b->dim)
return (TRUE);
}
else {
else
return (FALSE);
}
}
/* compare the common dimensions */
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (a->x[i] < b->x[i])
return (FALSE);
if (a->x[i] > b->x[i])
return (TRUE);
}
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (a->x[i + a->dim] < b->x[i + b->dim])
return (FALSE);
if (a->x[i + a->dim] > b->x[i + b->dim])
@ -810,28 +891,34 @@ bool cube_gt(NDBOX *box_a, NDBOX *box_b)
/* compare extra dimensions to zero */
if ( a->dim > b->dim ) {
for ( i = dim; i < a->dim; i++ ) {
if (a->dim > b->dim)
{
for (i = dim; i < a->dim; i++)
{
if (a->x[i] < 0)
return (FALSE);
if (a->x[i] > 0)
return (TRUE);
}
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (a->x[i + a->dim] < 0)
return (FALSE);
if (a->x[i + a->dim] > 0)
return (TRUE);
}
}
if ( a->dim < b->dim ) {
for ( i = dim; i < b->dim; i++ ) {
if (a->dim < b->dim)
{
for (i = dim; i < b->dim; i++)
{
if (b->x[i] < 0)
return (TRUE);
if (b->x[i] > 0)
return (FALSE);
}
for ( i = 0; i < dim; i++ ) {
for (i = 0; i < dim; i++)
{
if (b->x[i + b->dim] < 0)
return (TRUE);
if (b->x[i + b->dim] > 0)
@ -844,7 +931,8 @@ bool cube_gt(NDBOX *box_a, NDBOX *box_b)
/* Equal */
bool cube_same(NDBOX *box_a, NDBOX *box_b)
bool
cube_same(NDBOX * box_a, NDBOX * box_b)
{
int i;
NDBOX *a;
@ -857,20 +945,28 @@ bool cube_same(NDBOX *box_a, NDBOX *box_b)
b = swap_corners(box_b);
/* swap the box pointers if necessary */
if ( a->dim < b->dim ) {
NDBOX * tmp = b; b = a; a = tmp;
if (a->dim < b->dim)
{
NDBOX *tmp = b;
b = a;
a = tmp;
}
for ( i = 0; i < b->dim; i++ ) {
for (i = 0; i < b->dim; i++)
{
if (a->x[i] != b->x[i])
return (FALSE);
if (a->x[i + a->dim] != b->x[i + b->dim])
return (FALSE);
}
/* all dimensions of (b) are compared to those of (a);
instead of those in (a) absent in (b), compare (a) to zero */
for ( i = b->dim; i < a->dim; i++ ) {
/*
* all dimensions of (b) are compared to those of (a); instead of
* those in (a) absent in (b), compare (a) to zero
*/
for (i = b->dim; i < a->dim; i++)
{
if (a->x[i] != 0)
return (FALSE);
if (a->x[i + a->dim] != 0)
@ -884,7 +980,8 @@ bool cube_same(NDBOX *box_a, NDBOX *box_b)
}
/* Different */
bool cube_different(NDBOX *box_a, NDBOX *box_b)
bool
cube_different(NDBOX * box_a, NDBOX * box_b)
{
return (!cube_same(box_a, box_b));
}
@ -892,7 +989,8 @@ bool cube_different(NDBOX *box_a, NDBOX *box_b)
/* Contains */
/* Box(A) CONTAINS Box(B) IFF pt(A) < pt(B) */
bool cube_contains(NDBOX *box_a, NDBOX *box_b)
bool
cube_contains(NDBOX * box_a, NDBOX * box_b)
{
int i;
NDBOX *a;
@ -904,10 +1002,15 @@ bool cube_contains(NDBOX *box_a, NDBOX *box_b)
a = swap_corners(box_a);
b = swap_corners(box_b);
if ( a->dim < b->dim ) {
/* the further comparisons will make sense if the
excess dimensions of (b) were zeroes */
for ( i = a->dim; i < b->dim; i++ ) {
if (a->dim < b->dim)
{
/*
* the further comparisons will make sense if the excess
* dimensions of (b) were zeroes
*/
for (i = a->dim; i < b->dim; i++)
{
if (b->x[i] != 0)
return (FALSE);
if (b->x[i + b->dim] != 0)
@ -916,7 +1019,8 @@ bool cube_contains(NDBOX *box_a, NDBOX *box_b)
}
/* Can't care less about the excess dimensions of (a), if any */
for ( i = 0; i < min(a->dim, b->dim); i++ ) {
for (i = 0; i < min(a->dim, b->dim); i++)
{
if (a->x[i] > b->x[i])
return (FALSE);
if (a->x[i + a->dim] < b->x[i + b->dim])
@ -931,7 +1035,8 @@ bool cube_contains(NDBOX *box_a, NDBOX *box_b)
/* Contained */
/* Box(A) Contained by Box(B) IFF Box(B) Contains Box(A) */
bool cube_contained (NDBOX *a, NDBOX *b)
bool
cube_contained(NDBOX * a, NDBOX * b)
{
if (cube_contains(b, a) == TRUE)
return (TRUE);
@ -941,15 +1046,16 @@ bool cube_contained (NDBOX *a, NDBOX *b)
/* Overlap */
/* Box(A) Overlap Box(B) IFF (pt(a)LL < pt(B)UR) && (pt(b)LL < pt(a)UR) */
bool cube_overlap(NDBOX *box_a, NDBOX *box_b)
bool
cube_overlap(NDBOX * box_a, NDBOX * box_b)
{
int i;
NDBOX *a;
NDBOX *b;
/* This *very bad* error was found in the source:
if ( (a==NULL) || (b=NULL) )
return(FALSE);
/*
* This *very bad* error was found in the source: if ( (a==NULL) ||
* (b=NULL) ) return(FALSE);
*/
if ((box_a == NULL) || (box_b == NULL))
return (FALSE);
@ -958,12 +1064,17 @@ bool cube_overlap(NDBOX *box_a, NDBOX *box_b)
b = swap_corners(box_b);
/* swap the box pointers if needed */
if ( a->dim < b->dim ) {
NDBOX * tmp = b; b = a; a = tmp;
if (a->dim < b->dim)
{
NDBOX *tmp = b;
b = a;
a = tmp;
}
/* compare within the dimensions of (b) */
for ( i = 0; i < b->dim; i++ ) {
for (i = 0; i < b->dim; i++)
{
if (a->x[i] > b->x[i + b->dim])
return (FALSE);
if (a->x[i + a->dim] < b->x[i])
@ -971,7 +1082,8 @@ bool cube_overlap(NDBOX *box_a, NDBOX *box_b)
}
/* compare to zero those dimensions in (a) absent in (b) */
for ( i = b->dim; i < a->dim; i++ ) {
for (i = b->dim; i < a->dim; i++)
{
if (a->x[i] > 0)
return (FALSE);
if (a->x[i + a->dim] < 0)
@ -990,28 +1102,36 @@ bool cube_overlap(NDBOX *box_a, NDBOX *box_b)
between 1D projections of the boxes onto Cartesian axes. Assuming zero
distance between overlapping projections, this metric coincides with the
"common sense" geometric distance */
float *cube_distance(NDBOX *a, NDBOX *b)
float *
cube_distance(NDBOX * a, NDBOX * b)
{
int i;
double d, distance;
double d,
distance;
float *result;
result = (float *) palloc(sizeof(float));
/* swap the box pointers if needed */
if ( a->dim < b->dim ) {
NDBOX * tmp = b; b = a; a = tmp;
if (a->dim < b->dim)
{
NDBOX *tmp = b;
b = a;
a = tmp;
}
distance = 0.0;
/* compute within the dimensions of (b) */
for ( i = 0; i < b->dim; i++ ) {
for (i = 0; i < b->dim; i++)
{
d = distance_1D(a->x[i], a->x[i + a->dim], b->x[i], b->x[i + b->dim]);
distance += d * d;
}
/* compute distance to zero for those dimensions in (a) absent in (b) */
for ( i = b->dim; i < a->dim; i++ ) {
for (i = b->dim; i < a->dim; i++)
{
d = distance_1D(a->x[i], a->x[i + a->dim], 0.0, 0.0);
distance += d * d;
}
@ -1021,17 +1141,16 @@ float *cube_distance(NDBOX *a, NDBOX *b)
return (result);
}
static float distance_1D(float a1, float a2, float b1, float b2)
static float
distance_1D(float a1, float a2, float b1, float b2)
{
/* interval (a) is entirely on the left of (b) */
if( (a1 <= b1) && (a2 <= b1) && (a1 <= b2) && (a2 <= b2) ) {
if ((a1 <= b1) && (a2 <= b1) && (a1 <= b2) && (a2 <= b2))
return (min(b1, b2) - max(a1, a2));
}
/* interval (a) is entirely on the right of (b) */
if( (a1 > b1) && (a2 > b1) && (a1 > b2) && (a2 > b2) ) {
if ((a1 > b1) && (a2 > b1) && (a1 > b2) && (a2 > b2))
return (min(a1, a2) - max(b1, b2));
}
/* the rest are all sorts of intersections */
return (0.0);
@ -1040,16 +1159,19 @@ static float distance_1D(float a1, float a2, float b1, float b2)
/* normalize the box's co-ordinates by placing min(xLL,xUR) to LL
and max(xLL,xUR) to UR
*/
static NDBOX *swap_corners ( NDBOX *a )
static NDBOX *
swap_corners(NDBOX * a)
{
int i, j;
int i,
j;
NDBOX *result;
result = palloc(a->size);
result->size = a->size;
result->dim = a->dim;
for ( i = 0, j = a->dim; i < a->dim; i++, j++ ) {
for (i = 0, j = a->dim; i < a->dim; i++, j++)
{
result->x[i] = min(a->x[i], a->x[j]);
result->x[j] = max(a->x[i], a->x[j]);
}

View File

@ -1,4 +1,5 @@
typedef struct NDBOX {
typedef struct NDBOX
{
unsigned int size; /* required to be a Postgres varlena type */
unsigned int dim;
float x[1];

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for managed LargeObjects.
*
* $Header: /cvsroot/pgsql/contrib/lo/lo.c,v 1.7 2001/02/10 02:31:25 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/lo/lo.c,v 1.8 2001/03/22 03:59:09 momjian Exp $
*
*/

View File

@ -13,7 +13,8 @@
#include "libpq-fe.h"
/* these are the opts structures for command line params */
struct options {
struct options
{
int getdatabase;
int gettable;
int getoid;
@ -46,7 +47,8 @@ void sql_exec_searchtable(PGconn *, char *);
void sql_exec_searchoid(PGconn *, int);
/* fuction to parse command line options and check for some usage errors. */
void get_opts(int argc, char **argv, struct options *my_opts)
void
get_opts(int argc, char **argv, struct options * my_opts)
{
char c;
@ -148,11 +150,11 @@ void get_opts(int argc, char **argv, struct options *my_opts)
case 'h':
fprintf(stderr, "\n\
Usage: pg_oid2name [-d database [-x] ] [-t table | -o oid] \n\
dafault action display all databases
dafault action display all databases\n\
-d database database to oid2name\n\
-x display system tables\n\
-t table | -o oid search for table name (-t) or\n\
oid (-o) in -d database
oid (-o) in -d database\n\
-H host connect to remote host\n\
-p port host port to connect to\n\
-U username username to connect with\n\
@ -165,11 +167,15 @@ Usage: pg_oid2name [-d database [-x] ] [-t table | -o oid] \n\
}
/* establish connection with database. */
PGconn *sql_conn(char *dbName, struct options *my_opts)
PGconn *
sql_conn(char *dbName, struct options * my_opts)
{
char *pghost, *pgport;
char *pgoptions, *pgtty;
char *pguser, *pgpass;
char *pghost,
*pgport;
char *pgoptions,
*pgtty;
char *pguser,
*pgpass;
PGconn *conn;
@ -228,7 +234,8 @@ PGconn *sql_conn(char *dbName, struct options *my_opts)
}
/* If the sql_ command has an error, this function looks up the error number and prints it out. */
void sql_exec_error (int error_number)
void
sql_exec_error(int error_number)
{
fprintf(stderr, "Error number %i.\n", error_number);
switch (error_number)
@ -256,13 +263,15 @@ void sql_exec_error (int error_number)
}
/* actual code to make call to the database and print the output data */
int sql_exec(PGconn *conn, char *todo, int match)
int
sql_exec(PGconn *conn, char *todo, int match)
{
PGresult *res;
int numbfields;
int error_number;
int i, len;
int i,
len;
/* make the call */
res = PQexec(conn, todo);
@ -306,7 +315,8 @@ int sql_exec(PGconn *conn, char *todo, int match)
}
/* dump all databases know by the system table */
void sql_exec_dumpdb(PGconn *conn)
void
sql_exec_dumpdb(PGconn *conn)
{
char *todo;
@ -320,7 +330,8 @@ void sql_exec_dumpdb(PGconn *conn)
/* display all tables in whatever db we are connected to. don't display the
system tables by default */
void sql_exec_dumptable(PGconn *conn, int systables)
void
sql_exec_dumptable(PGconn *conn, int systables)
{
char *todo;
@ -337,7 +348,8 @@ void sql_exec_dumptable(PGconn *conn, int systables)
/* display the oid for a given tablename for whatever db we are connected
to. do we want to allow %bar% in the search? Not now. */
void sql_exec_searchtable(PGconn *conn, char *tablename)
void
sql_exec_searchtable(PGconn *conn, char *tablename)
{
int returnvalue;
char *todo;
@ -351,18 +363,15 @@ void sql_exec_searchtable(PGconn *conn, char *tablename)
/* deal with the return errors */
if (returnvalue == -1)
{
printf("No tables with that name found\n");
}
if (returnvalue == -2)
{
printf("VERY scary: more than one table with that name found!!\n");
}
}
/* same as above */
void sql_exec_searchoid(PGconn *conn, int oid)
void
sql_exec_searchoid(PGconn *conn, int oid)
{
int returnvalue;
char *todo;
@ -374,17 +383,14 @@ void sql_exec_searchoid(PGconn *conn, int oid)
returnvalue = sql_exec(conn, todo, 1);
if (returnvalue == -1)
{
printf("No tables with that oid found\n");
}
if (returnvalue == -2)
{
printf("VERY scary: more than one table with that oid found!!\n");
}
}
int main(int argc, char **argv)
int
main(int argc, char **argv)
{
struct options *my_opts;
PGconn *pgconn;

View File

@ -6,7 +6,7 @@
* copyright (c) Oliver Elphick <olly@lfix.co.uk>, 2001;
* licence: BSD
*
* $Header: /cvsroot/pgsql/contrib/pg_controldata/Attic/pg_controldata.c,v 1.2 2001/03/13 01:17:40 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/pg_controldata/Attic/pg_controldata.c,v 1.3 2001/03/22 03:59:09 momjian Exp $
*/
#include "postgres.h"
@ -51,7 +51,8 @@ main()
char ckpttime_str[32];
DataDir = getenv("PGDATA");
if ( DataDir == NULL ) {
if (DataDir == NULL)
{
fprintf(stderr, "PGDATA is not defined\n");
exit(1);
}

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.5 2001/01/24 19:42:44 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.6 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -52,25 +52,29 @@ load_lolist( LODumpMaster *pgLO )
" AND c.relkind = 'r' "
" AND c.relname NOT LIKE 'pg_%'");
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK) {
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn));
exit(RE_ERROR);
}
if ((n = PQntuples(pgLO->res)) == 0) {
if ((n = PQntuples(pgLO->res)) == 0)
{
fprintf(stderr, "%s: No OID columns in the database.\n", progname);
exit(RE_ERROR);
}
pgLO->lolist = (LOlist *) malloc((n + 1) * sizeof(LOlist));
if (!pgLO->lolist) {
if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR);
}
for (i = 0, ll = pgLO->lolist; i < n; i++, ll++) {
for (i = 0, ll = pgLO->lolist; i < n; i++, ll++)
{
ll->lo_table = strdup(PQgetvalue(pgLO->res, i, 0));
ll->lo_attr = strdup(PQgetvalue(pgLO->res, i, 1));
}
@ -87,8 +91,10 @@ pglo_export(LODumpMaster *pgLO)
char path[BUFSIZ],
Qbuff[QUERY_BUFSIZ];
if (pgLO->action != ACTION_SHOW) {
if (pgLO->action != ACTION_SHOW)
{
time_t t;
time(&t);
fprintf(pgLO->index, "#\n# This is the PostgreSQL large object dump index\n#\n");
fprintf(pgLO->index, "#\tDate: %s", ctime(&t));
@ -100,7 +106,8 @@ pglo_export(LODumpMaster *pgLO)
pgLO->counter = 0;
for(ll=pgLO->lolist; ll->lo_table != NULL; ll++) {
for (ll = pgLO->lolist; ll->lo_table != NULL; ll++)
{
/* ----------
* Query: find the LOs referenced by this column
@ -113,15 +120,19 @@ pglo_export(LODumpMaster *pgLO)
pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK) {
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn));
}
else if ((tuples = PQntuples(pgLO->res)) == 0) {
else if ((tuples = PQntuples(pgLO->res)) == 0)
{
if (!pgLO->quiet && pgLO->action == ACTION_EXPORT_ATTR)
printf("%s: no large objects in \"%s\".\"%s\"\n",
progname, ll->lo_table, ll->lo_attr);
} else {
}
else
{
int t;
char *val;
@ -130,13 +141,16 @@ pglo_export(LODumpMaster *pgLO)
* Create DIR/FILE
* ----------
*/
if (pgLO->action != ACTION_SHOW) {
if (pgLO->action != ACTION_SHOW)
{
sprintf(path, "%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_table);
if (mkdir(path, DIR_UMASK) == -1) {
if (errno != EEXIST) {
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
@ -145,8 +159,10 @@ pglo_export(LODumpMaster *pgLO)
sprintf(path, "%s/%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_table, ll->lo_attr);
if (mkdir(path, DIR_UMASK) == -1) {
if (errno != EEXIST) {
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
@ -159,14 +175,16 @@ pglo_export(LODumpMaster *pgLO)
pgLO->counter += tuples;
for(t=0; t<tuples; t++) {
for (t = 0; t < tuples; t++)
{
Oid lo;
val = PQgetvalue(pgLO->res, t, 0);
lo = atooid(val);
if (pgLO->action == ACTION_SHOW) {
if (pgLO->action == ACTION_SHOW)
{
printf("%s.%s: %u\n", ll->lo_table, ll->lo_attr, lo);
continue;
}

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.3 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -29,11 +29,14 @@ pglo_import(LODumpMaster *pgLO)
{
LOlist loa;
Oid new_oid;
char tab[MAX_TABLE_NAME], attr[MAX_ATTR_NAME],
path[BUFSIZ], lo_path[BUFSIZ],
char tab[MAX_TABLE_NAME],
attr[MAX_ATTR_NAME],
path[BUFSIZ],
lo_path[BUFSIZ],
Qbuff[QUERY_BUFSIZ];
while(fgets(Qbuff, QUERY_BUFSIZ, pgLO->index)) {
while (fgets(Qbuff, QUERY_BUFSIZ, pgLO->index))
{
if (*Qbuff == '#')
continue;
@ -51,7 +54,8 @@ pglo_import(LODumpMaster *pgLO)
* Import LO
* ----------
*/
if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0) {
if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0)
{
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
@ -60,7 +64,8 @@ pglo_import(LODumpMaster *pgLO)
exit(RE_ERROR);
}
if (pgLO->remove) {
if (pgLO->remove)
{
notice(pgLO, FALSE);
if (lo_unlink(pgLO->conn, loa.lo_oid) < 0)
fprintf(stderr, "%s: can't remove LO %u:\n%s",
@ -85,7 +90,8 @@ pglo_import(LODumpMaster *pgLO)
pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_COMMAND_OK) {
if (PQresultStatus(pgLO->res) != PGRES_COMMAND_OK)
{
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
PQclear(pgLO->res);
PQexec(pgLO->conn, "ROLLBACK");

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.6 2001/02/10 02:31:25 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.7 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -45,7 +45,8 @@ static void parse_lolist (LODumpMaster *pgLO);
int
main(int argc, char **argv)
{
LODumpMaster _pgLO, *pgLO = &_pgLO;
LODumpMaster _pgLO,
*pgLO = &_pgLO;
char *pwd = NULL;
pgLO->argv = argv;
@ -68,7 +69,8 @@ main(int argc, char **argv)
* Parse ARGV
* ----------
*/
if (argc > 1) {
if (argc > 1)
{
int arg;
extern int optind;
@ -90,11 +92,14 @@ main(int argc, char **argv)
{NULL, 0, 0, 0}
};
while((arg = getopt_long(argc, argv, "?aehu:p:qd:l:t:irs:w", l_opt, &l_index)) != -1) {
while ((arg = getopt_long(argc, argv, "?aehu:p:qd:l:t:irs:w", l_opt, &l_index)) != -1)
{
#else
while((arg = getopt(argc, argv, "?aehu:p:qd:l:t:irs:w")) != -1) {
while ((arg = getopt(argc, argv, "?aehu:p:qd:l:t:irs:w")) != -1)
{
#endif
switch(arg) {
switch (arg)
{
case '?':
case 'h':
usage();
@ -141,7 +146,9 @@ main(int argc, char **argv)
exit(RE_ERROR);
}
}
} else {
}
else
{
usage();
exit(RE_ERROR);
}
@ -150,14 +157,17 @@ main(int argc, char **argv)
* Check space
* ----------
*/
if (! pgLO->space && ! pgLO->action == ACTION_SHOW) {
if (!(pgLO->space = getenv("PWD"))) {
if (!pgLO->space && !pgLO->action == ACTION_SHOW)
{
if (!(pgLO->space = getenv("PWD")))
{
fprintf(stderr, "%s: not set space for dump-tree (option '-s' or $PWD).\n", progname);
exit(RE_ERROR);
}
}
if (!pgLO->action) {
if (!pgLO->action)
{
fprintf(stderr, "%s: What do you want - export or import?\n", progname);
exit(RE_ERROR);
}
@ -169,7 +179,8 @@ main(int argc, char **argv)
pgLO->conn = PQsetdbLogin(pgLO->host, NULL, NULL, NULL, pgLO->db,
pgLO->user, pwd);
if (PQstatus(pgLO->conn) == CONNECTION_BAD) {
if (PQstatus(pgLO->conn) == CONNECTION_BAD)
{
fprintf(stderr, "%s (connection): %s\n", progname, PQerrorMessage(pgLO->conn));
exit(RE_ERROR);
}
@ -187,7 +198,8 @@ main(int argc, char **argv)
PQexec(pgLO->conn, "BEGIN");
switch(pgLO->action) {
switch (pgLO->action)
{
case ACTION_SHOW:
case ACTION_EXPORT_ALL:
@ -196,7 +208,8 @@ main(int argc, char **argv)
case ACTION_EXPORT_ATTR:
pglo_export(pgLO);
if (!pgLO->quiet) {
if (!pgLO->quiet)
{
if (pgLO->action == ACTION_SHOW)
printf("\nDatabase '%s' contains %d large objects.\n\n", pgLO->db, pgLO->counter);
else
@ -230,18 +243,21 @@ parse_lolist (LODumpMaster *pgLO)
pgLO->lolist = (LOlist *) malloc(pgLO->argc * sizeof(LOlist));
if (! pgLO->lolist) {
if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR);
}
for (d = pgLO->argv + pgLO->lolist_start, ll = pgLO->lolist;
*d != NULL;
d++, ll++) {
d++, ll++)
{
strncpy(buff, *d, MAX_TABLE_NAME + MAX_ATTR_NAME);
if ((loc = strchr(buff, '.')) == NULL) {
if ((loc = strchr(buff, '.')) == NULL)
{
fprintf(stderr, "%s: '%s' is bad 'table.attr'\n", progname, buff);
exit(RE_ERROR);
}

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/pg_dumplo.h,v 1.3 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/pg_dumplo.h,v 1.4 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -36,13 +36,15 @@
* LO struct
* ----------
*/
typedef struct {
typedef struct
{
char *lo_table,
*lo_attr;
Oid lo_oid;
} LOlist;
typedef struct {
typedef struct
{
int action;
LOlist *lolist;
char **argv,
@ -60,7 +62,8 @@ typedef struct {
PGconn *conn;
} LODumpMaster;
typedef enum {
typedef enum
{
ACTION_NONE,
ACTION_SHOW,
ACTION_EXPORT_ATTR,

View File

@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/utils.c,v 1.3 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/utils.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
@ -39,10 +39,13 @@ index_file(LODumpMaster *pgLO)
sprintf(path, "%s/%s", pgLO->space, pgLO->db);
if (pgLO->action == ACTION_EXPORT_ATTR ||
pgLO->action == ACTION_EXPORT_ALL) {
pgLO->action == ACTION_EXPORT_ALL)
{
if (mkdir(path, DIR_UMASK) == -1) {
if (errno != EEXIST) {
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
@ -50,16 +53,20 @@ index_file(LODumpMaster *pgLO)
sprintf(path, "%s/lo_dump.index", path);
if ((pgLO->index = fopen(path, "w")) == NULL) {
if ((pgLO->index = fopen(path, "w")) == NULL)
{
perror(path);
exit(RE_ERROR);
}
} else if (pgLO->action != ACTION_NONE ) {
}
else if (pgLO->action != ACTION_NONE)
{
sprintf(path, "%s/lo_dump.index", path);
if ((pgLO->index = fopen(path, "r")) == NULL) {
if ((pgLO->index = fopen(path, "r")) == NULL)
{
perror(path);
exit(RE_ERROR);
}
@ -67,13 +74,15 @@ index_file(LODumpMaster *pgLO)
}
static
void Dummy_NoticeProcessor(void * arg, const char * message)
void
Dummy_NoticeProcessor(void *arg, const char *message)
{
;
}
static
void Default_NoticeProcessor(void * arg, const char * message)
void
Default_NoticeProcessor(void *arg, const char *message)
{
fprintf(stderr, "%s", message);
}
@ -81,6 +90,8 @@ void Default_NoticeProcessor(void * arg, const char * message)
void
notice(LODumpMaster * pgLO, int set)
{
if (set)PQsetNoticeProcessor(pgLO->conn, Default_NoticeProcessor, NULL);
else PQsetNoticeProcessor(pgLO->conn, Dummy_NoticeProcessor, NULL);
if (set)
PQsetNoticeProcessor(pgLO->conn, Default_NoticeProcessor, NULL);
else
PQsetNoticeProcessor(pgLO->conn, Dummy_NoticeProcessor, NULL);
}

View File

@ -13,24 +13,48 @@
#include <syslog.h>
#include <string.h>
struct {
struct
{
const char *tag;
int size;
int priority;
} tags[] = {
{ "", 0, LOG_NOTICE },
{ "emerg:", sizeof("emerg"), LOG_EMERG },
{ "alert:", sizeof("alert"), LOG_ALERT },
{ "crit:", sizeof("crit"), LOG_CRIT },
{ "err:", sizeof("err"), LOG_ERR },
{ "error:", sizeof("error"), LOG_ERR },
{ "warning:", sizeof("warning"), LOG_WARNING },
{ "notice:", sizeof("notice"), LOG_NOTICE },
{ "info:", sizeof("info"), LOG_INFO },
{ "debug:", sizeof("debug"), LOG_DEBUG }
} tags[] =
{
{
"", 0, LOG_NOTICE
},
{
"emerg:", sizeof("emerg"), LOG_EMERG
},
{
"alert:", sizeof("alert"), LOG_ALERT
},
{
"crit:", sizeof("crit"), LOG_CRIT
},
{
"err:", sizeof("err"), LOG_ERR
},
{
"error:", sizeof("error"), LOG_ERR
},
{
"warning:", sizeof("warning"), LOG_WARNING
},
{
"notice:", sizeof("notice"), LOG_NOTICE
},
{
"info:", sizeof("info"), LOG_INFO
},
{
"debug:", sizeof("debug"), LOG_DEBUG
}
};
int main()
int
main()
{
char buf[301];
int c;
@ -40,29 +64,29 @@ int main()
#ifndef DEBUG
openlog("postgresql", LOG_CONS, LOG_LOCAL1);
#endif
while ( (c = getchar()) != EOF) {
if (c == '\r') {
while ((c = getchar()) != EOF)
{
if (c == '\r')
continue;
}
if (c == '\n') {
if (c == '\n')
{
int level = sizeof(tags) / sizeof(*tags);
char *bol;
if (colon == 0 || (size_t)(colon - buf) > sizeof("warning")) {
if (colon == 0 || (size_t) (colon - buf) > sizeof("warning"))
level = 1;
}
*pos = 0;
while (--level) {
while (--level)
{
if (pos - buf >= tags[level].size
&& strncmp(buf, tags[level].tag, tags[level].size) == 0) {
&& strncmp(buf, tags[level].tag, tags[level].size) == 0)
break;
}
}
bol = buf + tags[level].size;
if (bol > buf && *bol == ' ') {
if (bol > buf && *bol == ' ')
++bol;
}
if (pos - bol > 0) {
if (pos - bol > 0)
{
#ifndef DEBUG
syslog(tags[level].priority, "%s", bol);
#else
@ -73,13 +97,10 @@ int main()
colon = (char const *) 0;
continue;
}
if (c == ':' && !colon) {
if (c == ':' && !colon)
colon = pos;
}
if ((size_t)(pos - buf) < sizeof(buf)-1) {
if ((size_t) (pos - buf) < sizeof(buf) - 1)
*pos++ = c;
}
}
return 0;
}

View File

@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/contrib/pg_resetxlog/Attic/pg_resetxlog.c,v 1.2 2001/03/16 05:08:39 tgl Exp $
* $Header: /cvsroot/pgsql/contrib/pg_resetxlog/Attic/pg_resetxlog.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -110,7 +110,8 @@ static char XLogDir[MAXPGPATH];
static char ControlFilePath[MAXPGPATH];
static ControlFileData ControlFile; /* pg_control values */
static uint32 newXlogId, newXlogSeg; /* ID/Segment of new XLOG segment */
static uint32 newXlogId,
newXlogSeg; /* ID/Segment of new XLOG segment */
static bool guessed = false; /* T if we had to guess at any values */
@ -146,10 +147,11 @@ ReadControlFile(void)
if ((fd = open(ControlFilePath, O_RDONLY)) < 0)
{
/*
* If pg_control is not there at all, or we can't read it,
* the odds are we've been handed a bad DataDir path, so give up.
* User can do "touch pg_control" to force us to proceed.
* If pg_control is not there at all, or we can't read it, the
* odds are we've been handed a bad DataDir path, so give up. User
* can do "touch pg_control" to force us to proceed.
*/
perror("Failed to open $PGDATA/global/pg_control for reading");
if (errno == ENOENT)
@ -193,6 +195,7 @@ ReadControlFile(void)
guessed = true;
return true;
}
/*
* Maybe it's a 7.1beta pg_control.
*/
@ -409,6 +412,7 @@ CheckControlVersion0(char *buffer, int len)
(char *) malloc(_INTL_MAXLOGRECSZ));
if (record == NULL)
{
/*
* We have to guess at the checkpoint contents.
*/
@ -442,8 +446,8 @@ RecordIsValidV0(XLogRecordV0 *record)
/*
* NB: this code is not right for V0 records containing backup blocks,
* but for now it's only going to be applied to checkpoint records,
* so I'm not going to worry about it...
* but for now it's only going to be applied to checkpoint records, so
* I'm not going to worry about it...
*/
INIT_CRC64V0(crc);
COMP_CRC64V0(crc, XLogRecGetData(record), len);
@ -510,10 +514,13 @@ ReadRecordV0(XLogRecPtr *RecPtr, char *buffer)
if (record->xl_len == 0)
goto next_record_is_invalid;
/*
* Compute total length of record including any appended backup blocks.
* Compute total length of record including any appended backup
* blocks.
*/
total_len = SizeOfXLogRecordV0 + record->xl_len;
/*
* Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea
@ -610,6 +617,7 @@ GuessControlValues(void)
{
#ifdef USE_LOCALE
char *localeptr;
#endif
/*
@ -710,8 +718,8 @@ RewriteControlFile(void)
char buffer[BLCKSZ]; /* need not be aligned */
/*
* Adjust fields as needed to force an empty XLOG starting at the
* next available segment.
* Adjust fields as needed to force an empty XLOG starting at the next
* available segment.
*/
newXlogId = ControlFile.logId;
newXlogSeg = ControlFile.logSeg;
@ -740,11 +748,11 @@ RewriteControlFile(void)
FIN_CRC64(ControlFile.crc);
/*
* We write out BLCKSZ bytes into pg_control, zero-padding the
* excess over sizeof(ControlFileData). This reduces the odds
* of premature-EOF errors when reading pg_control. We'll still
* fail when we check the contents of the file, but hopefully with
* a more specific error than "couldn't read pg_control".
* We write out BLCKSZ bytes into pg_control, zero-padding the excess
* over sizeof(ControlFileData). This reduces the odds of
* premature-EOF errors when reading pg_control. We'll still fail
* when we check the contents of the file, but hopefully with a more
* specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
{
@ -946,7 +954,8 @@ main(int argc, char ** argv)
/*
* Check for a postmaster lock file --- if there is one, refuse to
* proceed, on grounds we might be interfering with a live installation.
* proceed, on grounds we might be interfering with a live
* installation.
*/
snprintf(path, MAXPGPATH, "%s%cpostmaster.pid", DataDir, SEP_CHAR);
@ -973,8 +982,8 @@ main(int argc, char ** argv)
GuessControlValues();
/*
* If we had to guess anything, and -f was not given, just print
* the guessed values and exit. Also print if -n is given.
* If we had to guess anything, and -f was not given, just print the
* guessed values and exit. Also print if -n is given.
*/
if ((guessed && !force) || noupdate)
{

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: encode.c,v 1.3 2001/02/10 02:31:25 tgl Exp $
* $Id: encode.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -56,8 +56,11 @@ encode(PG_FUNCTION_ARGS)
{
text *arg;
text *name;
uint len, rlen, rlen0;
pg_coding *c, cbuf;
uint len,
rlen,
rlen0;
pg_coding *c,
cbuf;
text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -93,8 +96,11 @@ decode(PG_FUNCTION_ARGS)
{
text *arg;
text *name;
uint len, rlen, rlen0;
pg_coding *c, cbuf;
uint len,
rlen,
rlen0;
pg_coding *c,
cbuf;
text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -130,7 +136,8 @@ find_coding(pg_coding *dst, text *name, int silent)
uint len;
len = VARSIZE(name) - VARHDRSZ;
if (len >= NAMEDATALEN) {
if (len >= NAMEDATALEN)
{
if (silent)
return NULL;
elog(ERROR, "Encoding type does not exist (name too long)");
@ -152,7 +159,9 @@ uint
hex_encode(uint8 *src, uint len, uint8 *dst)
{
uint8 *end = src + len;
while (src < end) {
while (src < end)
{
*dst++ = hextbl[(*src >> 4) & 0xF];
*dst++ = hextbl[*src & 0xF];
src++;
@ -181,12 +190,19 @@ get_hex(char c)
uint
hex_decode(uint8 *src, uint len, uint8 *dst)
{
uint8 *s, *srcend, v1, v2, *p = dst;
uint8 *s,
*srcend,
v1,
v2,
*p = dst;
srcend = src + len;
s = src; p = dst;
while (s < srcend) {
if (*s == ' ' || *s == '\n' || *s == '\t' || *s == '\r') {
s = src;
p = dst;
while (s < srcend)
{
if (*s == ' ' || *s == '\n' || *s == '\t' || *s == '\r')
{
s++;
continue;
}
@ -207,19 +223,25 @@ static unsigned char _base64[] =
uint
b64_encode(uint8 *src, uint len, uint8 *dst)
{
uint8 *s, *p, *end = src + len, *lend = dst + 76;
uint8 *s,
*p,
*end = src + len,
*lend = dst + 76;
int pos = 2;
unsigned long buf = 0;
s = src; p = dst;
s = src;
p = dst;
while (s < end) {
while (s < end)
{
buf |= *s << (pos << 3);
pos--;
s++;
/* write it out */
if (pos < 0) {
if (pos < 0)
{
*p++ = _base64[(buf >> 18) & 0x3f];
*p++ = _base64[(buf >> 12) & 0x3f];
*p++ = _base64[(buf >> 6) & 0x3f];
@ -228,12 +250,14 @@ b64_encode(uint8 *src, uint len, uint8 *dst)
pos = 2;
buf = 0;
}
if (p >= lend) {
if (p >= lend)
{
*p++ = '\n';
lend = p + 76;
}
}
if (pos != 2) {
if (pos != 2)
{
*p++ = _base64[(buf >> 18) & 0x3f];
*p++ = _base64[(buf >> 12) & 0x3f];
*p++ = (pos == 0) ? _base64[(buf >> 6) & 0x3f] : '=';
@ -247,14 +271,17 @@ b64_encode(uint8 *src, uint len, uint8 *dst)
uint
b64_decode(uint8 *src, uint len, uint8 *dst)
{
char *srcend = src + len, *s = src;
char *srcend = src + len,
*s = src;
uint8 *p = dst;
char c;
uint b = 0;
unsigned long buf = 0;
int pos = 0, end = 0;
int pos = 0,
end = 0;
while (s < srcend) {
while (s < srcend)
{
c = *s++;
if (c >= 'A' && c <= 'Z')
b = c - 'A';
@ -266,16 +293,21 @@ b64_decode(uint8 *src, uint len, uint8 *dst)
b = 62;
else if (c == '/')
b = 63;
else if (c == '=') {
else if (c == '=')
{
/* end sequence */
if (!end) {
if (pos == 2) end = 1;
else if (pos == 3) end = 2;
if (!end)
{
if (pos == 2)
end = 1;
else if (pos == 3)
end = 2;
else
elog(ERROR, "base64: unexpected '='");
}
b = 0;
} else if (c == ' ' || c == '\t' || c == '\n' || c == '\r')
}
else if (c == ' ' || c == '\t' || c == '\n' || c == '\r')
continue;
else
elog(ERROR, "base64: Invalid symbol");
@ -283,7 +315,8 @@ b64_decode(uint8 *src, uint len, uint8 *dst)
/* add it to buffer */
buf = (buf << 6) + b;
pos++;
if (pos == 4) {
if (pos == 4)
{
*p++ = (buf >> 16) & 255;
if (end == 0 || end > 1)
*p++ = (buf >> 8) & 255;
@ -337,10 +370,11 @@ static pg_coding *
pg_find_coding(pg_coding * res, char *name)
{
pg_coding *p;
for (p = encoding_list; p->name; p++) {
for (p = encoding_list; p->name; p++)
{
if (!strcasecmp(p->name, name))
return p;
}
return NULL;
}

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: encode.h,v 1.1 2001/01/24 03:46:16 momjian Exp $
* $Id: encode.h,v 1.2 2001/03/22 03:59:10 momjian Exp $
*/
#ifndef __PG_ENCODE_H
@ -37,7 +37,8 @@ Datum encode(PG_FUNCTION_ARGS);
Datum decode(PG_FUNCTION_ARGS);
typedef struct _pg_coding pg_coding;
struct _pg_coding {
struct _pg_coding
{
char *name;
uint (*encode_len) (uint dlen);
uint (*decode_len) (uint dlen);
@ -57,4 +58,3 @@ uint b64_enc_len(uint srclen);
uint b64_dec_len(uint srclen);
#endif /* __PG_ENCODE_H */

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: internal.c,v 1.2 2001/02/10 02:31:25 tgl Exp $
* $Id: internal.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -66,7 +66,8 @@ int_digest_list [] = {
};
static uint
pg_md5_len(pg_digest *h) {
pg_md5_len(pg_digest * h)
{
return MD5_DIGEST_LENGTH;
}
@ -83,7 +84,8 @@ pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
}
static uint
pg_sha1_len(pg_digest *h) {
pg_sha1_len(pg_digest * h)
{
return SHA1_DIGEST_LENGTH;
}
@ -110,5 +112,3 @@ pg_find_digest(pg_digest *h, char *name)
return p;
return NULL;
}

View File

@ -31,7 +31,7 @@
* It is possible that this works with other SHA1/MD5
* implementations too.
*
* $Id: krb.c,v 1.3 2001/02/20 15:34:14 momjian Exp $
* $Id: krb.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -71,7 +71,8 @@ int_digest_list [] = {
};
static uint
pg_md5_len(pg_digest *h) {
pg_md5_len(pg_digest * h)
{
return MD5_DIGEST_LENGTH;
}
@ -88,7 +89,8 @@ pg_md5_digest(pg_digest *h, uint8 *src, uint len, uint8 *buf)
}
static uint
pg_sha1_len(pg_digest *h) {
pg_sha1_len(pg_digest * h)
{
return SHA1_DIGEST_LENGTH;
}
@ -115,5 +117,3 @@ pg_find_digest(pg_digest *h, char *name)
return p;
return NULL;
}

View File

@ -1,4 +1,4 @@
/* $Id: md5.c,v 1.4 2001/02/10 02:31:25 tgl Exp $ */
/* $Id: md5.c,v 1.5 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: md5.c,v 1.3 2000/02/22 14:01:17 itojun Exp $ */
/*
@ -127,7 +127,8 @@ static const uint8 md5_paddat[MD5_BUFLEN] = {
static void md5_calc(uint8 *, md5_ctxt *);
void md5_init(ctxt)
void
md5_init(ctxt)
md5_ctxt *ctxt;
{
ctxt->md5_n = 0;
@ -139,46 +140,54 @@ void md5_init(ctxt)
bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
}
void md5_loop(ctxt, input, len)
void
md5_loop(ctxt, input, len)
md5_ctxt *ctxt;
uint8 *input;
unsigned int len; /* number of bytes */
{
unsigned int gap, i;
unsigned int gap,
i;
ctxt->md5_n += len * 8; /* byte to bit */
gap = MD5_BUFLEN - ctxt->md5_i;
if (len >= gap) {
if (len >= gap)
{
bcopy((void *) input, (void *) (ctxt->md5_buf + ctxt->md5_i),
gap);
md5_calc(ctxt->md5_buf, ctxt);
for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN)
md5_calc((uint8 *) (input + i), ctxt);
}
ctxt->md5_i = len - i;
bcopy((void *) (input + i), (void *) ctxt->md5_buf, ctxt->md5_i);
} else {
}
else
{
bcopy((void *) input, (void *) (ctxt->md5_buf + ctxt->md5_i),
len);
ctxt->md5_i += len;
}
}
void md5_pad(ctxt)
void
md5_pad(ctxt)
md5_ctxt *ctxt;
{
unsigned int gap;
/* Don't count up padding. Keep md5_n. */
gap = MD5_BUFLEN - ctxt->md5_i;
if (gap > 8) {
if (gap > 8)
{
bcopy((void *) md5_paddat,
(void *) (ctxt->md5_buf + ctxt->md5_i),
gap - sizeof(ctxt->md5_n));
} else {
}
else
{
/* including gap == 8 */
bcopy((void *) md5_paddat, (void *) (ctxt->md5_buf + ctxt->md5_i),
gap);
@ -206,7 +215,8 @@ void md5_pad(ctxt)
md5_calc(ctxt->md5_buf, ctxt);
}
void md5_result(digest, ctxt)
void
md5_result(digest, ctxt)
uint8 *digest;
md5_ctxt *ctxt;
{
@ -215,22 +225,32 @@ void md5_result(digest, ctxt)
bcopy(&ctxt->md5_st8[0], digest, 16);
#endif
#if BYTE_ORDER == BIG_ENDIAN
digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
digest[0] = ctxt->md5_st8[3];
digest[1] = ctxt->md5_st8[2];
digest[2] = ctxt->md5_st8[1];
digest[3] = ctxt->md5_st8[0];
digest[4] = ctxt->md5_st8[7];
digest[5] = ctxt->md5_st8[6];
digest[6] = ctxt->md5_st8[5];
digest[7] = ctxt->md5_st8[4];
digest[8] = ctxt->md5_st8[11];
digest[9] = ctxt->md5_st8[10];
digest[10] = ctxt->md5_st8[9];
digest[11] = ctxt->md5_st8[8];
digest[12] = ctxt->md5_st8[15];
digest[13] = ctxt->md5_st8[14];
digest[14] = ctxt->md5_st8[13];
digest[15] = ctxt->md5_st8[12];
#endif
}
#if BYTE_ORDER == BIG_ENDIAN
uint32 X[16];
#endif
static void md5_calc(b64, ctxt)
static void
md5_calc(b64, ctxt)
uint8 *b64;
md5_ctxt *ctxt;
{
@ -238,66 +258,149 @@ static void md5_calc(b64, ctxt)
uint32 B = ctxt->md5_stb;
uint32 C = ctxt->md5_stc;
uint32 D = ctxt->md5_std;
#if BYTE_ORDER == LITTLE_ENDIAN
uint32 *X = (uint32 *) b64;
#endif
#if BYTE_ORDER == BIG_ENDIAN
/* 4 byte words */
/* what a brute force but fast! */
uint8 *y = (uint8 *) X;
y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
y[0] = b64[3];
y[1] = b64[2];
y[2] = b64[1];
y[3] = b64[0];
y[4] = b64[7];
y[5] = b64[6];
y[6] = b64[5];
y[7] = b64[4];
y[8] = b64[11];
y[9] = b64[10];
y[10] = b64[9];
y[11] = b64[8];
y[12] = b64[15];
y[13] = b64[14];
y[14] = b64[13];
y[15] = b64[12];
y[16] = b64[19];
y[17] = b64[18];
y[18] = b64[17];
y[19] = b64[16];
y[20] = b64[23];
y[21] = b64[22];
y[22] = b64[21];
y[23] = b64[20];
y[24] = b64[27];
y[25] = b64[26];
y[26] = b64[25];
y[27] = b64[24];
y[28] = b64[31];
y[29] = b64[30];
y[30] = b64[29];
y[31] = b64[28];
y[32] = b64[35];
y[33] = b64[34];
y[34] = b64[33];
y[35] = b64[32];
y[36] = b64[39];
y[37] = b64[38];
y[38] = b64[37];
y[39] = b64[36];
y[40] = b64[43];
y[41] = b64[42];
y[42] = b64[41];
y[43] = b64[40];
y[44] = b64[47];
y[45] = b64[46];
y[46] = b64[45];
y[47] = b64[44];
y[48] = b64[51];
y[49] = b64[50];
y[50] = b64[49];
y[51] = b64[48];
y[52] = b64[55];
y[53] = b64[54];
y[54] = b64[53];
y[55] = b64[52];
y[56] = b64[59];
y[57] = b64[58];
y[58] = b64[57];
y[59] = b64[56];
y[60] = b64[63];
y[61] = b64[62];
y[62] = b64[61];
y[63] = b64[60];
#endif
ROUND1(A, B, C, D, 0, Sa, 1); ROUND1(D, A, B, C, 1, Sb, 2);
ROUND1(C, D, A, B, 2, Sc, 3); ROUND1(B, C, D, A, 3, Sd, 4);
ROUND1(A, B, C, D, 4, Sa, 5); ROUND1(D, A, B, C, 5, Sb, 6);
ROUND1(C, D, A, B, 6, Sc, 7); ROUND1(B, C, D, A, 7, Sd, 8);
ROUND1(A, B, C, D, 8, Sa, 9); ROUND1(D, A, B, C, 9, Sb, 10);
ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
ROUND1(A, B, C, D, 0, Sa, 1);
ROUND1(D, A, B, C, 1, Sb, 2);
ROUND1(C, D, A, B, 2, Sc, 3);
ROUND1(B, C, D, A, 3, Sd, 4);
ROUND1(A, B, C, D, 4, Sa, 5);
ROUND1(D, A, B, C, 5, Sb, 6);
ROUND1(C, D, A, B, 6, Sc, 7);
ROUND1(B, C, D, A, 7, Sd, 8);
ROUND1(A, B, C, D, 8, Sa, 9);
ROUND1(D, A, B, C, 9, Sb, 10);
ROUND1(C, D, A, B, 10, Sc, 11);
ROUND1(B, C, D, A, 11, Sd, 12);
ROUND1(A, B, C, D, 12, Sa, 13);
ROUND1(D, A, B, C, 13, Sb, 14);
ROUND1(C, D, A, B, 14, Sc, 15);
ROUND1(B, C, D, A, 15, Sd, 16);
ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18);
ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20);
ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A, 4, Sh, 24);
ROUND2(A, B, C, D, 9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
ROUND2(C, D, A, B, 3, Sg, 27); ROUND2(B, C, D, A, 8, Sh, 28);
ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C, 2, Sf, 30);
ROUND2(C, D, A, B, 7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
ROUND2(A, B, C, D, 1, Se, 17);
ROUND2(D, A, B, C, 6, Sf, 18);
ROUND2(C, D, A, B, 11, Sg, 19);
ROUND2(B, C, D, A, 0, Sh, 20);
ROUND2(A, B, C, D, 5, Se, 21);
ROUND2(D, A, B, C, 10, Sf, 22);
ROUND2(C, D, A, B, 15, Sg, 23);
ROUND2(B, C, D, A, 4, Sh, 24);
ROUND2(A, B, C, D, 9, Se, 25);
ROUND2(D, A, B, C, 14, Sf, 26);
ROUND2(C, D, A, B, 3, Sg, 27);
ROUND2(B, C, D, A, 8, Sh, 28);
ROUND2(A, B, C, D, 13, Se, 29);
ROUND2(D, A, B, C, 2, Sf, 30);
ROUND2(C, D, A, B, 7, Sg, 31);
ROUND2(B, C, D, A, 12, Sh, 32);
ROUND3(A, B, C, D, 5, Si, 33); ROUND3(D, A, B, C, 8, Sj, 34);
ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
ROUND3(A, B, C, D, 1, Si, 37); ROUND3(D, A, B, C, 4, Sj, 38);
ROUND3(C, D, A, B, 7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C, 0, Sj, 42);
ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44);
ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48);
ROUND3(A, B, C, D, 5, Si, 33);
ROUND3(D, A, B, C, 8, Sj, 34);
ROUND3(C, D, A, B, 11, Sk, 35);
ROUND3(B, C, D, A, 14, Sl, 36);
ROUND3(A, B, C, D, 1, Si, 37);
ROUND3(D, A, B, C, 4, Sj, 38);
ROUND3(C, D, A, B, 7, Sk, 39);
ROUND3(B, C, D, A, 10, Sl, 40);
ROUND3(A, B, C, D, 13, Si, 41);
ROUND3(D, A, B, C, 0, Sj, 42);
ROUND3(C, D, A, B, 3, Sk, 43);
ROUND3(B, C, D, A, 6, Sl, 44);
ROUND3(A, B, C, D, 9, Si, 45);
ROUND3(D, A, B, C, 12, Sj, 46);
ROUND3(C, D, A, B, 15, Sk, 47);
ROUND3(B, C, D, A, 2, Sl, 48);
ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64);
ROUND4(A, B, C, D, 0, Sm, 49);
ROUND4(D, A, B, C, 7, Sn, 50);
ROUND4(C, D, A, B, 14, So, 51);
ROUND4(B, C, D, A, 5, Sp, 52);
ROUND4(A, B, C, D, 12, Sm, 53);
ROUND4(D, A, B, C, 3, Sn, 54);
ROUND4(C, D, A, B, 10, So, 55);
ROUND4(B, C, D, A, 1, Sp, 56);
ROUND4(A, B, C, D, 8, Sm, 57);
ROUND4(D, A, B, C, 15, Sn, 58);
ROUND4(C, D, A, B, 6, So, 59);
ROUND4(B, C, D, A, 13, Sp, 60);
ROUND4(A, B, C, D, 4, Sm, 61);
ROUND4(D, A, B, C, 11, Sn, 62);
ROUND4(C, D, A, B, 2, So, 63);
ROUND4(B, C, D, A, 9, Sp, 64);
ctxt->md5_sta += A;
ctxt->md5_stb += B;

View File

@ -1,4 +1,4 @@
/* $Id: md5.h,v 1.3 2001/01/09 16:07:13 momjian Exp $ */
/* $Id: md5.h,v 1.4 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: md5.h,v 1.3 2000/02/22 14:01:18 itojun Exp $ */
/*
@ -35,8 +35,10 @@
#define MD5_BUFLEN 64
typedef struct {
union {
typedef struct
{
union
{
uint32 md5_state32[4];
uint8 md5_state8[16];
} md5_st;
@ -47,7 +49,8 @@ typedef struct {
#define md5_std md5_st.md5_state32[3]
#define md5_st8 md5_st.md5_state8
union {
union
{
uint64 md5_count64;
uint8 md5_count8[8];
} md5_count;

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mhash.c,v 1.2 2001/02/10 02:31:26 tgl Exp $
* $Id: mhash.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -37,12 +37,12 @@
static uint
pg_mhash_len(pg_digest * hash);
static uint8 *
pg_mhash_digest(pg_digest *hash, uint8 *src,
static uint8 *pg_mhash_digest(pg_digest * hash, uint8 *src,
uint len, uint8 *buf);
static uint
pg_mhash_len(pg_digest *h) {
pg_mhash_len(pg_digest * h)
{
return mhash_get_block_size(h->misc.code);
}
@ -52,6 +52,7 @@ pg_mhash_digest(pg_digest *h, uint8 *src, uint len, uint8 *dst)
uint8 *res;
MHASH mh = mhash_init(h->misc.code);
mhash(mh, src, len);
res = mhash_end(mh);
@ -64,17 +65,21 @@ pg_mhash_digest(pg_digest *h, uint8 *src, uint len, uint8 *dst)
pg_digest *
pg_find_digest(pg_digest * h, char *name)
{
size_t hnum, i, b;
size_t hnum,
i,
b;
char *mname;
hnum = mhash_count();
for (i = 0; i <= hnum; i++) {
for (i = 0; i <= hnum; i++)
{
mname = mhash_get_hash_name(i);
if (mname == NULL)
continue;
b = strcasecmp(name, mname);
free(mname);
if (!b) {
if (!b)
{
h->name = mhash_get_hash_name(i);
h->length = pg_mhash_len;
h->digest = pg_mhash_digest;
@ -84,4 +89,3 @@ pg_find_digest(pg_digest *h, char *name)
}
return NULL;
}

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: openssl.c,v 1.2 2001/02/10 02:31:26 tgl Exp $
* $Id: openssl.c,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -41,7 +41,8 @@ static uint8 *
pg_ossl_digest(pg_digest * h, uint8 *src, uint len, uint8 *buf);
static uint
pg_ossl_len(pg_digest *h) {
pg_ossl_len(pg_digest * h)
{
return EVP_MD_size((EVP_MD *) h->misc.ptr);
}
@ -65,7 +66,8 @@ pg_find_digest(pg_digest *h, char *name)
{
const EVP_MD *md;
if (!pg_openssl_initialized) {
if (!pg_openssl_initialized)
{
OpenSSL_add_all_digests();
pg_openssl_initialized = 1;
}
@ -81,5 +83,3 @@ pg_find_digest(pg_digest *h, char *name)
return h;
}

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: pgcrypto.c,v 1.6 2001/02/10 02:31:26 tgl Exp $
* $Id: pgcrypto.c,v 1.7 2001/03/22 03:59:10 momjian Exp $
*/
#include "postgres.h"
@ -60,8 +60,10 @@ digest(PG_FUNCTION_ARGS)
{
text *arg;
text *name;
uint len, hlen;
pg_digest *h, _hbuf;
uint len,
hlen;
pg_digest *h,
_hbuf;
text *res;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
@ -93,7 +95,8 @@ Datum
digest_exists(PG_FUNCTION_ARGS)
{
text *name;
pg_digest _hbuf, *res;
pg_digest _hbuf,
*res;
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
@ -117,7 +120,8 @@ find_digest(pg_digest *hbuf, text *name, int silent)
uint len;
len = VARSIZE(name) - VARHDRSZ;
if (len >= NAMEDATALEN) {
if (len >= NAMEDATALEN)
{
if (silent)
return NULL;
elog(ERROR, "Hash type does not exist (name too long)");
@ -132,4 +136,3 @@ find_digest(pg_digest *hbuf, text *name, int silent)
elog(ERROR, "Hash type does not exist: '%s'", buf);
return p;
}

View File

@ -26,20 +26,22 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: pgcrypto.h,v 1.2 2001/01/09 16:07:13 momjian Exp $
* $Id: pgcrypto.h,v 1.3 2001/03/22 03:59:10 momjian Exp $
*/
#ifndef _PG_CRYPTO_H
#define _PG_CRYPTO_H
typedef struct _pg_digest pg_digest;
struct _pg_digest {
struct _pg_digest
{
char *name;
uint (*length) (pg_digest * h);
uint8 *(*digest) (pg_digest * h, uint8 *data,
uint dlen, uint8 *buf);
/* private */
union {
union
{
uint code;
const void *ptr;
} misc;

View File

@ -1,4 +1,4 @@
/* $Id: sha1.c,v 1.4 2001/02/10 02:31:26 tgl Exp $ */
/* $Id: sha1.c,v 1.5 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: sha1.c,v 1.3 2000/02/22 14:01:18 itojun Exp $ */
/*
@ -50,6 +50,7 @@
/* constant table */
static uint32 _K[] = {0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6};
#define K(t) _K[(t) / 20]
#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
@ -87,74 +88,135 @@ static void
sha1_step(ctxt)
struct sha1_ctxt *ctxt;
{
uint32 a, b, c, d, e;
size_t t, s;
uint32 a,
b,
c,
d,
e;
size_t t,
s;
uint32 tmp;
#if BYTE_ORDER == LITTLE_ENDIAN
struct sha1_ctxt tctxt;
bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
ctxt->m.b8[0] = tctxt.m.b8[3];
ctxt->m.b8[1] = tctxt.m.b8[2];
ctxt->m.b8[2] = tctxt.m.b8[1];
ctxt->m.b8[3] = tctxt.m.b8[0];
ctxt->m.b8[4] = tctxt.m.b8[7];
ctxt->m.b8[5] = tctxt.m.b8[6];
ctxt->m.b8[6] = tctxt.m.b8[5];
ctxt->m.b8[7] = tctxt.m.b8[4];
ctxt->m.b8[8] = tctxt.m.b8[11];
ctxt->m.b8[9] = tctxt.m.b8[10];
ctxt->m.b8[10] = tctxt.m.b8[9];
ctxt->m.b8[11] = tctxt.m.b8[8];
ctxt->m.b8[12] = tctxt.m.b8[15];
ctxt->m.b8[13] = tctxt.m.b8[14];
ctxt->m.b8[14] = tctxt.m.b8[13];
ctxt->m.b8[15] = tctxt.m.b8[12];
ctxt->m.b8[16] = tctxt.m.b8[19];
ctxt->m.b8[17] = tctxt.m.b8[18];
ctxt->m.b8[18] = tctxt.m.b8[17];
ctxt->m.b8[19] = tctxt.m.b8[16];
ctxt->m.b8[20] = tctxt.m.b8[23];
ctxt->m.b8[21] = tctxt.m.b8[22];
ctxt->m.b8[22] = tctxt.m.b8[21];
ctxt->m.b8[23] = tctxt.m.b8[20];
ctxt->m.b8[24] = tctxt.m.b8[27];
ctxt->m.b8[25] = tctxt.m.b8[26];
ctxt->m.b8[26] = tctxt.m.b8[25];
ctxt->m.b8[27] = tctxt.m.b8[24];
ctxt->m.b8[28] = tctxt.m.b8[31];
ctxt->m.b8[29] = tctxt.m.b8[30];
ctxt->m.b8[30] = tctxt.m.b8[29];
ctxt->m.b8[31] = tctxt.m.b8[28];
ctxt->m.b8[32] = tctxt.m.b8[35];
ctxt->m.b8[33] = tctxt.m.b8[34];
ctxt->m.b8[34] = tctxt.m.b8[33];
ctxt->m.b8[35] = tctxt.m.b8[32];
ctxt->m.b8[36] = tctxt.m.b8[39];
ctxt->m.b8[37] = tctxt.m.b8[38];
ctxt->m.b8[38] = tctxt.m.b8[37];
ctxt->m.b8[39] = tctxt.m.b8[36];
ctxt->m.b8[40] = tctxt.m.b8[43];
ctxt->m.b8[41] = tctxt.m.b8[42];
ctxt->m.b8[42] = tctxt.m.b8[41];
ctxt->m.b8[43] = tctxt.m.b8[40];
ctxt->m.b8[44] = tctxt.m.b8[47];
ctxt->m.b8[45] = tctxt.m.b8[46];
ctxt->m.b8[46] = tctxt.m.b8[45];
ctxt->m.b8[47] = tctxt.m.b8[44];
ctxt->m.b8[48] = tctxt.m.b8[51];
ctxt->m.b8[49] = tctxt.m.b8[50];
ctxt->m.b8[50] = tctxt.m.b8[49];
ctxt->m.b8[51] = tctxt.m.b8[48];
ctxt->m.b8[52] = tctxt.m.b8[55];
ctxt->m.b8[53] = tctxt.m.b8[54];
ctxt->m.b8[54] = tctxt.m.b8[53];
ctxt->m.b8[55] = tctxt.m.b8[52];
ctxt->m.b8[56] = tctxt.m.b8[59];
ctxt->m.b8[57] = tctxt.m.b8[58];
ctxt->m.b8[58] = tctxt.m.b8[57];
ctxt->m.b8[59] = tctxt.m.b8[56];
ctxt->m.b8[60] = tctxt.m.b8[63];
ctxt->m.b8[61] = tctxt.m.b8[62];
ctxt->m.b8[62] = tctxt.m.b8[61];
ctxt->m.b8[63] = tctxt.m.b8[60];
#endif
a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
a = H(0);
b = H(1);
c = H(2);
d = H(3);
e = H(4);
for (t = 0; t < 20; t++) {
for (t = 0; t < 20; t++)
{
s = t & 0x0f;
if (t >= 16) {
if (t >= 16)
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
}
tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 20; t < 40; t++) {
for (t = 20; t < 40; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 40; t < 60; t++) {
for (t = 40; t < 60; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
for (t = 60; t < 80; t++) {
for (t = 60; t < 80; t++)
{
s = t & 0x0f;
W(s) = S(1, W((s + 13) & 0x0f) ^ W((s + 8) & 0x0f) ^ W((s + 2) & 0x0f) ^ W(s));
tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
e = d; d = c; c = S(30, b); b = a; a = tmp;
e = d;
d = c;
c = S(30, b);
b = a;
a = tmp;
}
H(0) = H(0) + a;
@ -191,7 +253,8 @@ sha1_pad(ctxt)
padstart = COUNT % 64;
padlen = 64 - padstart;
if (padlen < 8) {
if (padlen < 8)
{
bzero(&ctxt->m.b8[padstart], padlen);
COUNT += padlen;
COUNT %= 64;
@ -203,15 +266,23 @@ sha1_pad(ctxt)
COUNT += (padlen - 8);
COUNT %= 64;
#if BYTE_ORDER == BIG_ENDIAN
PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
PUTPAD(ctxt->c.b8[0]);
PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[7]);
#else
PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
PUTPAD(ctxt->c.b8[7]);
PUTPAD(ctxt->c.b8[6]);
PUTPAD(ctxt->c.b8[5]);
PUTPAD(ctxt->c.b8[4]);
PUTPAD(ctxt->c.b8[3]);
PUTPAD(ctxt->c.b8[2]);
PUTPAD(ctxt->c.b8[1]);
PUTPAD(ctxt->c.b8[0]);
#endif
}
@ -230,7 +301,8 @@ sha1_loop(ctxt, input0, len)
input = (const uint8 *) input0;
off = 0;
while (off < len) {
while (off < len)
{
gapstart = COUNT % 64;
gaplen = 64 - gapstart;
@ -257,16 +329,26 @@ sha1_result(ctxt, digest0)
#if BYTE_ORDER == BIG_ENDIAN
bcopy(&ctxt->h.b8[0], digest, 20);
#else
digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
digest[0] = ctxt->h.b8[3];
digest[1] = ctxt->h.b8[2];
digest[2] = ctxt->h.b8[1];
digest[3] = ctxt->h.b8[0];
digest[4] = ctxt->h.b8[7];
digest[5] = ctxt->h.b8[6];
digest[6] = ctxt->h.b8[5];
digest[7] = ctxt->h.b8[4];
digest[8] = ctxt->h.b8[11];
digest[9] = ctxt->h.b8[10];
digest[10] = ctxt->h.b8[9];
digest[11] = ctxt->h.b8[8];
digest[12] = ctxt->h.b8[15];
digest[13] = ctxt->h.b8[14];
digest[14] = ctxt->h.b8[13];
digest[15] = ctxt->h.b8[12];
digest[16] = ctxt->h.b8[19];
digest[17] = ctxt->h.b8[18];
digest[18] = ctxt->h.b8[17];
digest[19] = ctxt->h.b8[16];
#endif
}

View File

@ -1,4 +1,4 @@
/* $Id: sha1.h,v 1.3 2001/01/09 16:07:13 momjian Exp $ */
/* $Id: sha1.h,v 1.4 2001/03/22 03:59:10 momjian Exp $ */
/* $KAME: sha1.h,v 1.4 2000/02/22 14:01:18 itojun Exp $ */
/*
@ -38,16 +38,20 @@
#ifndef _NETINET6_SHA1_H_
#define _NETINET6_SHA1_H_
struct sha1_ctxt {
union {
struct sha1_ctxt
{
union
{
uint8 b8[20];
uint32 b32[5];
} h;
union {
union
{
uint8 b8[8];
uint64 b64[1];
} c;
union {
union
{
uint8 b8[64];
uint32 b32[16];
} m;
@ -61,6 +65,7 @@ extern void sha1_result (struct sha1_ctxt *, caddr_t);
/* compatibilty with other SHA1 source codes */
typedef struct sha1_ctxt SHA1_CTX;
#define SHA1Init(x) sha1_init((x))
#define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
#define SHA1Final(x, y) sha1_result((y), (x))

View File

@ -19,10 +19,12 @@ PG_FUNCTION_INFO_V1(_rserv_debug_);
Datum _rserv_log_(PG_FUNCTION_ARGS);
Datum _rserv_sync_(PG_FUNCTION_ARGS);
Datum _rserv_debug_(PG_FUNCTION_ARGS);
#else
HeapTuple _rserv_log_(void);
int32 _rserv_sync_(int32);
int32 _rserv_debug_(int32);
#endif
static int debug = 0;
@ -203,6 +205,7 @@ _rserv_sync_(int32 server)
{
#ifdef PG_FUNCTION_INFO_V1
int32 server = PG_GETARG_INT32(0);
#endif
char sql[8192];
char buf[8192];
@ -248,6 +251,7 @@ _rserv_debug_(int32 newval)
{
#ifdef PG_FUNCTION_INFO_V1
int32 newval = PG_GETARG_INT32(0);
#endif
int32 oldval = debug;
@ -271,21 +275,27 @@ OutputValue(char *key, char *buf, int size)
{
switch (*key)
{
case '\\': subst ="\\\\";
case '\\':
subst = "\\\\";
slen = 2;
break;
case ' ': subst = "\\011";
case ' ':
subst = "\\011";
slen = 4;
break;
case '\n': subst = "\\012";
case '\n':
subst = "\\012";
slen = 4;
break;
case '\'': subst = "\\047";
case '\'':
subst = "\\047";
slen = 4;
break;
case '\0': out[i] = 0;
case '\0':
out[i] = 0;
return (out);
default: slen = 1;
default:
slen = 1;
break;
}

View File

@ -20,30 +20,32 @@ unsigned int parse_buffer_pos( void );
extern void seg_flush_scanner_buffer(void); /* defined in segscan.l */
void set_parse_buffer( char* s )
void
set_parse_buffer(char *s)
{
PARSE_BUFFER = s;
PARSE_BUFFER_SIZE = strlen(s);
if ( PARSE_BUFFER_SIZE == 0 ) {
if (PARSE_BUFFER_SIZE == 0)
elog(ERROR, "seg_in: can't parse an empty string");
}
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
}
void reset_parse_buffer( void )
void
reset_parse_buffer(void)
{
PARSE_BUFFER_PTR = PARSE_BUFFER;
SCANNER_POS = 0;
seg_flush_scanner_buffer();
}
int read_parse_buffer( void )
int
read_parse_buffer(void)
{
int c;
/*
c = *PARSE_BUFFER_PTR++;
SCANNER_POS++;
* c = *PARSE_BUFFER_PTR++; SCANNER_POS++;
*/
c = PARSE_BUFFER[SCANNER_POS];
if (SCANNER_POS < PARSE_BUFFER_SIZE)
@ -51,29 +53,32 @@ int read_parse_buffer( void )
return c;
}
char * parse_buffer( void )
char *
parse_buffer(void)
{
return PARSE_BUFFER;
}
unsigned int parse_buffer_curr_char( void )
unsigned int
parse_buffer_curr_char(void)
{
return PARSE_BUFFER[SCANNER_POS];
}
char * parse_buffer_ptr( void )
char *
parse_buffer_ptr(void)
{
return PARSE_BUFFER_PTR;
}
unsigned int parse_buffer_pos( void )
unsigned int
parse_buffer_pos(void)
{
return SCANNER_POS;
}
unsigned int parse_buffer_size( void )
unsigned int
parse_buffer_size(void)
{
return PARSE_BUFFER_SIZE;
}

View File

@ -27,6 +27,7 @@
extern void set_parse_buffer(char *str);
extern int seg_yyparse();
/*
extern int seg_yydebug;
*/
@ -99,12 +100,14 @@ SEG *
seg_in(char *str)
{
SEG *result = palloc(sizeof(SEG));
set_parse_buffer(str);
/*
seg_yydebug = 1;
* seg_yydebug = 1;
*/
if ( seg_yyparse(result) != 0 ) {
if (seg_yyparse(result) != 0)
{
pfree(result);
return NULL;
}
@ -125,31 +128,38 @@ seg_out(SEG *seg)
char *result;
char *p;
if (seg == NULL) return(NULL);
if (seg == NULL)
return (NULL);
p = result = (char *) palloc(40);
if ( seg->l_ext == '>' || seg->l_ext == '<' || seg->l_ext == '~' ) {
if (seg->l_ext == '>' || seg->l_ext == '<' || seg->l_ext == '~')
p += sprintf(p, "%c", seg->l_ext);
}
if ( seg->lower == seg->upper && seg->l_ext == seg->u_ext ) {
/* indicates that this interval was built by seg_in off a single point */
if (seg->lower == seg->upper && seg->l_ext == seg->u_ext)
{
/*
* indicates that this interval was built by seg_in off a single
* point
*/
p += restore(p, seg->lower, seg->l_sigd);
}
else {
if ( seg->l_ext != '-' ) {
else
{
if (seg->l_ext != '-')
{
/* print the lower boudary if exists */
p += restore(p, seg->lower, seg->l_sigd);
p += sprintf(p, " ");
}
p += sprintf(p, "..");
if ( seg->u_ext != '-' ) {
if (seg->u_ext != '-')
{
/* print the upper boudary if exists */
p += sprintf(p, " ");
if ( seg->u_ext == '>' || seg->u_ext == '<' || seg->l_ext == '~' ) {
if (seg->u_ext == '>' || seg->u_ext == '<' || seg->l_ext == '~')
p += sprintf(p, "%c", seg->u_ext);
}
p += restore(p, seg->upper, seg->u_sigd);
}
}
@ -209,9 +219,10 @@ gseg_consistent(GISTENTRY *entry,
SEG * query,
StrategyNumber strategy)
{
/*
** if entry is not leaf, use gseg_internal_consistent,
** else use gseg_leaf_consistent
* * if entry is not leaf, use gseg_internal_consistent, * else use
* gseg_leaf_consistent
*/
if (GIST_LEAF(entry))
return (gseg_leaf_consistent((SEG *) (entry->pred), query, strategy));
@ -226,7 +237,8 @@ gseg_consistent(GISTENTRY *entry,
SEG *
gseg_union(bytea *entryvec, int *sizep)
{
int numranges, i;
int numranges,
i;
SEG *out = (SEG *) NULL;
SEG *tmp;
@ -238,17 +250,21 @@ gseg_union(bytea *entryvec, int *sizep)
tmp = (SEG *) (((GISTENTRY *) (VARDATA(entryvec)))[0]).pred;
*sizep = sizeof(SEG);
for (i = 1; i < numranges; i++) {
for (i = 1; i < numranges; i++)
{
out = gseg_binary_union(tmp, (SEG *)
(((GISTENTRY *) (VARDATA(entryvec)))[i]).pred,
sizep);
#ifdef GIST_DEBUG
/*
fprintf(stderr, "\t%s ^ %s -> %s\n", seg_out(tmp), seg_out((SEG *)(((GISTENTRY *)(VARDATA(entryvec)))[i]).pred), seg_out(out));
* fprintf(stderr, "\t%s ^ %s -> %s\n", seg_out(tmp), seg_out((SEG
* *)(((GISTENTRY *)(VARDATA(entryvec)))[i]).pred), seg_out(out));
*/
#endif
if (i > 1) pfree(tmp);
if (i > 1)
pfree(tmp);
tmp = out;
}
@ -279,7 +295,8 @@ float *
gseg_penalty(GISTENTRY *origentry, GISTENTRY *newentry, float *result)
{
Datum ud;
float tmp1, tmp2;
float tmp1,
tmp2;
ud = (Datum) seg_union((SEG *) (origentry->pred), (SEG *) (newentry->pred));
rt_seg_size((SEG *) ud, &tmp1);
@ -305,18 +322,30 @@ GIST_SPLITVEC *
gseg_picksplit(bytea *entryvec,
GIST_SPLITVEC *v)
{
OffsetNumber i, j;
SEG *datum_alpha, *datum_beta;
SEG *datum_l, *datum_r;
SEG *union_d, *union_dl, *union_dr;
OffsetNumber i,
j;
SEG *datum_alpha,
*datum_beta;
SEG *datum_l,
*datum_r;
SEG *union_d,
*union_dl,
*union_dr;
SEG *inter_d;
bool firsttime;
float size_alpha, size_beta, size_union, size_inter;
float size_waste, waste;
float size_l, size_r;
float size_alpha,
size_beta,
size_union,
size_inter;
float size_waste,
waste;
float size_l,
size_r;
int nbytes;
OffsetNumber seed_1 = 0, seed_2 = 0;
OffsetNumber *left, *right;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
OffsetNumber maxoff;
#ifdef GIST_DEBUG
@ -331,9 +360,11 @@ gseg_picksplit(bytea *entryvec,
firsttime = true;
waste = 0.0;
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i))
{
datum_alpha = (SEG *) (((GISTENTRY *) (VARDATA(entryvec)))[i].pred);
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j)) {
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j))
{
datum_beta = (SEG *) (((GISTENTRY *) (VARDATA(entryvec)))[j].pred);
/* compute the wasted space by unioning these guys */
@ -350,11 +381,12 @@ gseg_picksplit(bytea *entryvec,
pfree(inter_d);
/*
* are these a more promising split that what we've
* already seen?
* are these a more promising split that what we've already
* seen?
*/
if (size_waste > waste || firsttime) {
if (size_waste > waste || firsttime)
{
waste = size_waste;
seed_1 = i;
seed_2 = j;
@ -377,31 +409,34 @@ gseg_picksplit(bytea *entryvec,
/*
* Now split up the regions between the two seeds. An important
* property of this split algorithm is that the split vector v
* has the indices of items to be split in order in its left and
* right vectors. We exploit this property by doing a merge in
* the code that actually splits the page.
* property of this split algorithm is that the split vector v has the
* indices of items to be split in order in its left and right
* vectors. We exploit this property by doing a merge in the code
* that actually splits the page.
*
* For efficiency, we also place the new index tuple in this loop.
* This is handled at the very end, when we have placed all the
* existing tuples and i == maxoff + 1.
* For efficiency, we also place the new index tuple in this loop. This
* is handled at the very end, when we have placed all the existing
* tuples and i == maxoff + 1.
*/
maxoff = OffsetNumberNext(maxoff);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
/*
* If we've already decided where to place this item, just
* put it on the right list. Otherwise, we need to figure
* out which page needs the least enlargement in order to
* store the item.
* If we've already decided where to place this item, just put it
* on the right list. Otherwise, we need to figure out which page
* needs the least enlargement in order to store the item.
*/
if (i == seed_1) {
if (i == seed_1)
{
*left++ = i;
v->spl_nleft++;
continue;
} else if (i == seed_2) {
}
else if (i == seed_2)
{
*right++ = i;
v->spl_nright++;
continue;
@ -415,14 +450,17 @@ gseg_picksplit(bytea *entryvec,
rt_seg_size((SEG *) union_dr, &size_beta);
/* pick which page to add it to */
if (size_alpha - size_l < size_beta - size_r) {
if (size_alpha - size_l < size_beta - size_r)
{
pfree(datum_l);
pfree(union_dr);
datum_l = union_dl;
size_l = size_alpha;
*left++ = i;
v->spl_nleft++;
} else {
}
else
{
pfree(datum_r);
pfree(union_dl);
datum_r = union_dr;
@ -447,7 +485,8 @@ gseg_same(SEG *b1, SEG *b2, bool *result)
{
if (seg_same(b1, b2))
*result = TRUE;
else *result = FALSE;
else
*result = FALSE;
#ifdef GIST_DEBUG
fprintf(stderr, "same: %s\n", (*result ? "TRUE" : "FALSE"));
@ -470,7 +509,8 @@ gseg_leaf_consistent(SEG *key,
fprintf(stderr, "leaf_consistent, %d\n", strategy);
#endif
switch(strategy) {
switch (strategy)
{
case RTLeftStrategyNumber:
retval = (bool) seg_left(key, query);
break;
@ -512,7 +552,8 @@ gseg_internal_consistent(SEG *key,
fprintf(stderr, "internal_consistent, %d\n", strategy);
#endif
switch(strategy) {
switch (strategy)
{
case RTLeftStrategyNumber:
case RTOverLeftStrategyNumber:
retval = (bool) seg_over_left(key, query);
@ -723,6 +764,7 @@ seg_size(SEG *a)
int32
seg_cmp(SEG * a, SEG * b)
{
/*
* First compare on lower boundary position
*/
@ -730,13 +772,13 @@ seg_cmp(SEG *a, SEG *b)
return -1;
if (a->lower > b->lower)
return 1;
/*
* a->lower == b->lower, so consider type of boundary.
*
* A '-' lower bound is < any other kind (this could only be relevant
* if -HUGE is used as a regular data value).
* A '<' lower bound is < any other kind except '-'.
* A '>' lower bound is > any other kind.
* A '-' lower bound is < any other kind (this could only be relevant if
* -HUGE is used as a regular data value). A '<' lower bound is < any
* other kind except '-'. A '>' lower bound is > any other kind.
*/
if (a->l_ext != b->l_ext)
{
@ -753,13 +795,17 @@ seg_cmp(SEG *a, SEG *b)
if (b->l_ext == '>')
return -1;
}
/*
* For other boundary types, consider # of significant digits first.
*/
if ( a->l_sigd < b->l_sigd ) /* (a) is blurred and is likely to include (b) */
if (a->l_sigd < b->l_sigd) /* (a) is blurred and is likely to include
* (b) */
return -1;
if ( a->l_sigd > b->l_sigd ) /* (a) is less blurred and is likely to be included in (b) */
if (a->l_sigd > b->l_sigd) /* (a) is less blurred and is likely to be
* included in (b) */
return 1;
/*
* For same # of digits, an approximate boundary is more blurred than
* exact.
@ -784,13 +830,13 @@ seg_cmp(SEG *a, SEG *b)
return -1;
if (a->upper > b->upper)
return 1;
/*
* a->upper == b->upper, so consider type of boundary.
*
* A '-' upper bound is > any other kind (this could only be relevant
* if HUGE is used as a regular data value).
* A '<' upper bound is < any other kind.
* A '>' upper bound is > any other kind except '-'.
* A '-' upper bound is > any other kind (this could only be relevant if
* HUGE is used as a regular data value). A '<' upper bound is < any
* other kind. A '>' upper bound is > any other kind except '-'.
*/
if (a->u_ext != b->u_ext)
{
@ -807,14 +853,18 @@ seg_cmp(SEG *a, SEG *b)
if (b->u_ext == '>')
return -1;
}
/*
* For other boundary types, consider # of significant digits first.
* Note result here is converse of the lower-boundary case.
*/
if ( a->u_sigd < b->u_sigd ) /* (a) is blurred and is likely to include (b) */
if (a->u_sigd < b->u_sigd) /* (a) is blurred and is likely to include
* (b) */
return 1;
if ( a->u_sigd > b->u_sigd ) /* (a) is less blurred and is likely to be included in (b) */
if (a->u_sigd > b->u_sigd) /* (a) is less blurred and is likely to be
* included in (b) */
return -1;
/*
* For same # of digits, an approximate boundary is more blurred than
* exact. Again, result is converse of lower-boundary case.
@ -875,7 +925,8 @@ seg_different(SEG *a, SEG *b)
* is similar to %.ng except it prints 8.00 where %.ng would
* print 8
*/
static int restore ( char * result, float val, int n )
static int
restore(char *result, float val, int n)
{
static char efmt[8] = {'%', '-', '1', '5', '.', '#', 'e', 0};
char buf[25] = {
@ -888,106 +939,129 @@ static int restore ( char * result, float val, int n )
char *p;
char *mant;
int exp;
int i, dp, sign;
int i,
dp,
sign;
/* put a cap on the number of siugnificant digits to avoid
nonsense in the output */
/*
* put a cap on the number of siugnificant digits to avoid nonsense in
* the output
*/
n = min(n, FLT_DIG);
/* remember the sign */
sign = (val < 0 ? 1 : 0);
efmt[5] = '0' + (n-1)%10; /* makes %-15.(n-1)e -- this format guarantees that
the exponent is always present */
efmt[5] = '0' + (n - 1) % 10; /* makes %-15.(n-1)e -- this
* format guarantees that the
* exponent is always present */
sprintf(result, efmt, val);
/* trim the spaces left by the %e */
for( p = result; *p != ' '; p++ ); *p = '\0';
for (p = result; *p != ' '; p++);
*p = '\0';
/* get the exponent */
mant = (char *) strtok(strdup(result), "e");
exp = atoi(strtok(NULL, "e"));
if ( exp == 0 ) {
if (exp == 0)
{
/* use the supplied mantyssa with sign */
strcpy((char *) index(result, 'e'), "");
}
else {
if ( abs( exp ) <= 4 ) {
/* remove the decimal point from the mantyssa and write the digits to the buf array */
for( p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++ ) {
else
{
if (abs(exp) <= 4)
{
/*
* remove the decimal point from the mantyssa and write the
* digits to the buf array
*/
for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++)
{
buf[i] = *p;
if( *p == '.' ) {
if (*p == '.')
{
dp = i--; /* skip the decimal point */
}
}
if (dp == 0) dp = i--; /* no decimal point was found in the above for() loop */
if (dp == 0)
dp = i--; /* no decimal point was found in the above
* for() loop */
if (exp > 0)
{
if (dp - 10 + exp >= n)
{
if ( exp > 0 ) {
if ( dp - 10 + exp >= n ) {
/*
the decimal point is behind the last significant digit;
the digits in between must be converted to the exponent
and the decimal point placed after the first digit
* the decimal point is behind the last significant
* digit; the digits in between must be converted to
* the exponent and the decimal point placed after the
* first digit
*/
exp = dp - 10 + exp - n;
buf[10 + n] = '\0';
/* insert the decimal point */
if ( n > 1 ) {
if (n > 1)
{
dp = 11;
for ( i = 23; i > dp; i-- ) {
for (i = 23; i > dp; i--)
buf[i] = buf[i - 1];
}
buf[dp] = '.';
}
/* adjust the exponent by the number of digits after the decimal point */
if ( n > 1 ) {
/*
* adjust the exponent by the number of digits after
* the decimal point
*/
if (n > 1)
sprintf(&buf[11 + n], "e%d", exp + n - 1);
}
else {
else
sprintf(&buf[11], "e%d", exp + n - 1);
}
if ( sign ) {
if (sign)
{
buf[9] = '-';
strcpy(result, &buf[9]);
}
else {
else
strcpy(result, &buf[10]);
}
}
else { /* insert the decimal point */
else
{ /* insert the decimal point */
dp += exp;
for ( i = 23; i > dp; i-- ) {
for (i = 23; i > dp; i--)
buf[i] = buf[i - 1];
}
buf[11 + n] = '\0';
buf[dp] = '.';
if ( sign ) {
if (sign)
{
buf[9] = '-';
strcpy(result, &buf[9]);
}
else {
else
strcpy(result, &buf[10]);
}
}
}
else { /* exp <= 0 */
else
{ /* exp <= 0 */
dp += exp - 1;
buf[10 + n] = '\0';
buf[dp] = '.';
if ( sign ) {
if (sign)
{
buf[dp - 2] = '-';
strcpy(result, &buf[dp - 2]);
}
else {
else
strcpy(result, &buf[dp - 1]);
}
}
}
/* do nothing for abs(exp) > 4; %e must be OK */
/* just get rid of zeroes after [eE]- and +zeroes after [Ee]. */
@ -1023,27 +1097,36 @@ seg_contains_float8(SEG *a, float8 *b)
/* find out the number of significant digits in a string representing
* a floating point number
*/
int significant_digits ( char* s )
int
significant_digits(char *s)
{
char *p = s;
int n, c, zeroes;
int n,
c,
zeroes;
zeroes = 1;
/* skip leading zeroes and sign */
for (c = *p; (c == '0' || c == '+' || c == '-') && c != 0; c = *(++p));
/* skip decimal point and following zeroes */
for ( c = *p; (c == '0' || c == '.' ) && c != 0; c = *(++p) ) {
if ( c != '.') zeroes++;
for (c = *p; (c == '0' || c == '.') && c != 0; c = *(++p))
{
if (c != '.')
zeroes++;
}
/* count significant digits (n) */
for ( c = *p, n = 0; c != 0; c = *(++p) ) {
if ( !( (c >= '0' && c <= '9') || (c == '.') ) ) break;
if ( c != '.') n++;
for (c = *p, n = 0; c != 0; c = *(++p))
{
if (!((c >= '0' && c <= '9') || (c == '.')))
break;
if (c != '.')
n++;
}
if (!n) return ( zeroes );
if (!n)
return (zeroes);
return (n);
}

View File

@ -1,4 +1,5 @@
typedef struct SEG {
typedef struct SEG
{
float lower;
float upper;
char l_sigd;

View File

@ -1,4 +1,4 @@
/* $Header: /cvsroot/pgsql/contrib/soundex/Attic/soundex.c,v 1.10 2001/02/10 02:31:26 tgl Exp $ */
/* $Header: /cvsroot/pgsql/contrib/soundex/Attic/soundex.c,v 1.11 2001/03/22 03:59:10 momjian Exp $ */
#include "postgres.h"
#include <ctype.h>
@ -42,6 +42,7 @@ text_soundex(PG_FUNCTION_ARGS)
/* ABCDEFGHIJKLMNOPQRSTUVWXYZ */
static const char *soundex_table = "01230120022455012623010202";
#define soundex_code(letter) soundex_table[toupper((unsigned char) (letter)) - 'A']
@ -114,4 +115,5 @@ main (int argc, char *argv[])
return 0;
}
}
#endif /* SOUNDEX_TEST */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.8 2001/01/24 19:42:45 momjian Exp $
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.9 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -76,6 +76,7 @@ vacuumlo(char *database, int verbose)
return -1;
}
PQclear(res);
/*
* Vacuum the temp table so that planner will generate decent plans
* for the DELETEs below.
@ -96,13 +97,13 @@ vacuumlo(char *database, int verbose)
/*
* Now find any candidate tables who have columns of type oid.
*
* NOTE: the temp table formed above is ignored, because its real
* table name will be pg_something. Also, pg_largeobject will be
* ignored. If either of these were scanned, obviously we'd end up
* with nothing to delete...
* NOTE: the temp table formed above is ignored, because its real table
* name will be pg_something. Also, pg_largeobject will be ignored.
* If either of these were scanned, obviously we'd end up with nothing
* to delete...
*
* NOTE: the system oid column is ignored, as it has attnum < 1.
* This shouldn't matter for correctness, but it saves time.
* NOTE: the system oid column is ignored, as it has attnum < 1. This
* shouldn't matter for correctness, but it saves time.
*/
buf[0] = '\0';
strcat(buf, "SELECT c.relname, a.attname ");
@ -135,9 +136,9 @@ vacuumlo(char *database, int verbose)
fprintf(stdout, "Checking %s in %s\n", field, table);
/*
* We use a DELETE with implicit join for efficiency. This
* is a Postgres-ism and not portable to other DBMSs, but
* then this whole program is a Postgres-ism.
* We use a DELETE with implicit join for efficiency. This is a
* Postgres-ism and not portable to other DBMSs, but then this
* whole program is a Postgres-ism.
*/
sprintf(buf, "DELETE FROM vacuum_l WHERE lo = \"%s\".\"%s\" ",
table, field);
@ -159,8 +160,8 @@ vacuumlo(char *database, int verbose)
/*
* Run the actual deletes in a single transaction. Note that this
* would be a bad idea in pre-7.1 Postgres releases (since rolling
* back a table delete used to cause problems), but it should
* be safe now.
* back a table delete used to cause problems), but it should be safe
* now.
*/
res = PQexec(conn, "begin");
PQclear(res);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.69 2001/01/24 19:42:46 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@ -306,8 +306,8 @@ nocachegetattr(HeapTuple tuple,
int j;
/*
* In for(), we test <= and not < because we want to see
* if we can go past it in initializing offsets.
* In for(), we test <= and not < because we want to see if we
* can go past it in initializing offsets.
*/
for (j = 0; j <= attnum; j++)
{
@ -321,9 +321,9 @@ nocachegetattr(HeapTuple tuple,
}
/*
* If slow is false, and we got here, we know that we have a tuple with
* no nulls or varlenas before the target attribute. If possible, we
* also want to initialize the remainder of the attribute cached
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or varlenas before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.52 2001/02/22 21:48:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -45,9 +45,11 @@ index_formtuple(TupleDesc tupleDescriptor,
bool hasnull = false;
uint16 tupmask = 0;
int numberOfAttributes = tupleDescriptor->natts;
#ifdef TOAST_INDEX_HACK
Datum untoasted_value[INDEX_MAX_KEYS];
bool untoasted_free[INDEX_MAX_KEYS];
#endif
if (numberOfAttributes > INDEX_MAX_KEYS)
@ -79,8 +81,8 @@ index_formtuple(TupleDesc tupleDescriptor,
}
/*
* If value is above size target, and is of a compressible datatype,
* try to compress it in-line.
* If value is above size target, and is of a compressible
* datatype, try to compress it in-line.
*/
if (VARATT_SIZE(untoasted_value[i]) > TOAST_INDEX_TARGET &&
!VARATT_IS_EXTENDED(untoasted_value[i]) &&
@ -146,8 +148,8 @@ index_formtuple(TupleDesc tupleDescriptor,
/*
* We do this because DataFill wants to initialize a "tupmask" which
* is used for HeapTuples, but we want an indextuple infomask. The
* only relevant info is the "has variable attributes" field.
* We have already set the hasnull bit above.
* only relevant info is the "has variable attributes" field. We have
* already set the hasnull bit above.
*/
if (tupmask & HEAP_HASVARLENA)
@ -315,9 +317,9 @@ nocache_index_getattr(IndexTuple tup,
}
/*
* If slow is false, and we got here, we know that we have a tuple with
* no nulls or varlenas before the target attribute. If possible, we
* also want to initialize the remainder of the attribute cached
* If slow is false, and we got here, we know that we have a tuple
* with no nulls or varlenas before the target attribute. If possible,
* we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
@ -391,10 +393,8 @@ nocache_index_getattr(IndexTuple tup,
usecache = false;
}
else
{
off += att[i]->attlen;
}
}
off = att_align(off, att[attnum]->attlen, att[attnum]->attalign);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.57 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -200,9 +200,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
if (OidIsValid(thisState->typoutput))
{
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
* If we have a toasted datum, forcibly detoast it here to
* avoid memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@ -308,9 +309,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena))
{
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
* If we have a toasted datum, forcibly detoast it here to
* avoid memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@ -405,6 +407,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
/* send # of bytes, and opaque data */
if (thisState->typisvarlena)
{
/*
* If we have a toasted datum, must detoast before sending.
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.71 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@ -242,9 +242,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We do not need to check every single field here, and in fact
* some fields such as attdispersion probably shouldn't be
* compared. We can also disregard attnum (it was used to
* place the row in the attrs array) and everything derived
* from the column datatype.
* compared. We can also disregard attnum (it was used to place
* the row in the attrs array) and everything derived from the
* column datatype.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@ -276,8 +276,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We can't assume that the items are always read from the
* system catalogs in the same order; so use the adnum field to
* identify the matching item to compare.
* system catalogs in the same order; so use the adnum field
* to identify the matching item to compare.
*/
for (j = 0; j < n; defval2++, j++)
{
@ -298,9 +298,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
ConstrCheck *check2 = constr2->check;
/*
* Similarly, don't assume that the checks are always read
* in the same order; match them up by name and contents.
* (The name *should* be unique, but...)
* Similarly, don't assume that the checks are always read in
* the same order; match them up by name and contents. (The
* name *should* be unique, but...)
*/
for (j = 0; j < n; check2++, j++)
{

View File

@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.71 2001/03/07 21:20:26 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.72 2001/03/22 03:59:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -80,6 +80,7 @@ static void gistcentryinit(GISTSTATE *giststate,
#undef GISTDEBUG
#ifdef GISTDEBUG
static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff);
#endif
/*
@ -92,8 +93,10 @@ gistbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -105,9 +108,11 @@ gistbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
GISTSTATE giststate;
@ -181,6 +186,7 @@ gistbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -262,9 +268,7 @@ gistbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -316,8 +320,10 @@ gistinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
InsertIndexResult res;
IndexTuple itup;
@ -409,9 +415,12 @@ static void
gistdoinsert(Relation r,
IndexTuple itup,
InsertIndexResult *res,
GISTSTATE *giststate ) {
GISTSTATE *giststate)
{
IndexTuple *instup;
int i,ret,len = 1;
int i,
ret,
len = 1;
instup = (IndexTuple *) palloc(sizeof(IndexTuple));
instup[0] = (IndexTuple) palloc(IndexTupleSize(itup));
@ -431,7 +440,8 @@ gistlayerinsert( Relation r, BlockNumber blkno,
IndexTuple **itup, /* in - out, has compressed entry */
int *len, /* in - out */
InsertIndexResult *res, /* out */
GISTSTATE *giststate ) {
GISTSTATE *giststate)
{
Buffer buffer;
Page page;
OffsetNumber child;
@ -442,7 +452,8 @@ gistlayerinsert( Relation r, BlockNumber blkno,
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
if (!(opaque->flags & F_LEAF)) {
if (!(opaque->flags & F_LEAF))
{
/* internal page, so we must walk on tree */
/* len IS equial 1 */
ItemId iid;
@ -456,24 +467,26 @@ gistlayerinsert( Relation r, BlockNumber blkno,
nblkno = ItemPointerGetBlockNumber(&(oldtup->t_tid));
/*
* After this call:
* 1. if child page was splited, then itup contains
* keys for each page
* 2. if child page wasn't splited, then itup contains
* additional for adjustement of current key
* After this call: 1. if child page was splited, then itup
* contains keys for each page 2. if child page wasn't splited,
* then itup contains additional for adjustement of current key
*/
ret = gistlayerinsert(r, nblkno, itup, len, res, giststate);
/* nothing inserted in child */
if ( ! (ret & INSERTED) ) {
if (!(ret & INSERTED))
{
ReleaseBuffer(buffer);
return 0x00;
}
/* child does not splited */
if ( ! (ret & SPLITED) ) {
if (!(ret & SPLITED))
{
IndexTuple newtup = gistgetadjusted(r, oldtup, (*itup)[0], giststate);
if ( ! newtup ) {
if (!newtup)
{
/* not need to update key */
ReleaseBuffer(buffer);
return 0x00;
@ -492,7 +505,8 @@ gistlayerinsert( Relation r, BlockNumber blkno,
ret = INSERTED;
if ( gistnospace(page, (*itup), *len) ) {
if (gistnospace(page, (*itup), *len))
{
/* no space for insertion */
IndexTuple *itvec;
int tlen;
@ -502,13 +516,17 @@ gistlayerinsert( Relation r, BlockNumber blkno,
itvec = gistjoinvector(itvec, &tlen, (*itup), *len);
pfree((*itup));
(*itup) = gistSplit(r, buffer, itvec, &tlen, giststate,
(opaque->flags & F_LEAF) ? res : NULL ); /*res only for inserting in leaf*/
(opaque->flags & F_LEAF) ? res : NULL); /* res only for
* inserting in leaf */
ReleaseBuffer(buffer);
pfree(itvec);
*len = tlen; /* now tlen >= 2 */
} else {
}
else
{
/* enogth space */
OffsetNumber off, l;
OffsetNumber off,
l;
off = (PageIsEmpty(page)) ?
FirstOffsetNumber
@ -517,16 +535,22 @@ gistlayerinsert( Relation r, BlockNumber blkno,
l = gistwritebuffer(r, page, (*itup), *len, off, giststate);
WriteBuffer(buffer);
/* set res if insert into leaf page, in
this case, len = 1 always */
/*
* set res if insert into leaf page, in this case, len = 1 always
*/
if (res && (opaque->flags & F_LEAF))
ItemPointerSet(&((*res)->pointerData), blkno, l);
if ( *len > 1 ) { /* previos insert ret & SPLITED != 0 */
if (*len > 1)
{ /* previos insert ret & SPLITED != 0 */
int i;
/* child was splited, so we must form union
* for insertion in parent */
/*
* child was splited, so we must form union for insertion in
* parent
*/
IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
for (i = 0; i < *len; i++)
pfree((*itup)[i]);
(*itup)[0] = newtup;
@ -542,13 +566,15 @@ gistlayerinsert( Relation r, BlockNumber blkno,
*/
static OffsetNumber
gistwritebuffer(Relation r, Page page, IndexTuple *itup,
int len, OffsetNumber off, GISTSTATE *giststate) {
int len, OffsetNumber off, GISTSTATE *giststate)
{
OffsetNumber l = InvalidOffsetNumber;
int i;
GISTENTRY tmpdentry;
IndexTuple newtup;
for(i=0; i<len; i++) {
for (i = 0; i < len; i++)
{
l = gistPageAddItem(giststate, r, page,
(Item) itup[i], IndexTupleSize(itup[i]),
off, LP_USED, &tmpdentry, &newtup);
@ -565,9 +591,11 @@ gistwritebuffer( Relation r, Page page, IndexTuple *itup,
* Check space for itup vector on page
*/
static int
gistnospace( Page page, IndexTuple *itvec, int len ) {
gistnospace(Page page, IndexTuple *itvec, int len)
{
int size = 0;
int i;
for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + 4; /* ??? */
@ -578,8 +606,10 @@ gistnospace( Page page, IndexTuple *itvec, int len ) {
* Read buffer into itup vector
*/
static IndexTuple *
gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
OffsetNumber i, maxoff;
gistreadbuffer(Relation r, Buffer buffer, int *len /* out */ )
{
OffsetNumber i,
maxoff;
IndexTuple *itvec;
Page p = (Page) BufferGetPage(buffer);
@ -596,7 +626,8 @@ gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
* join two vectors into one
*/
static IndexTuple *
gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen ) {
gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
{
itvec = (IndexTuple *) repalloc((void *) itvec, sizeof(IndexTuple) * ((*len) + addlen));
memmove(&itvec[*len], additvec, sizeof(IndexTuple) * addlen);
*len += addlen;
@ -607,10 +638,12 @@ gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen )
* return union of itup vector
*/
static IndexTuple
gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
{
bytea *evec;
char *datum;
int datumsize, i;
int datumsize,
i;
GISTENTRY centry;
char isnull;
IndexTuple newtup;
@ -653,13 +686,16 @@ gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
* Forms union of oldtup and addtup, if union == oldtup then return NULL
*/
static IndexTuple
gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate ) {
gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
{
bytea *evec;
char *datum;
int datumsize;
bool result;
char isnull;
GISTENTRY centry, *ev0p, *ev1p;
GISTENTRY centry,
*ev0p,
*ev1p;
IndexTuple newtup = NULL;
evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ);
@ -682,19 +718,23 @@ gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gi
PointerGetDatum(evec),
PointerGetDatum(&datumsize)));
if ( ! ( ev0p->pred && ev1p->pred ) ) {
if (!(ev0p->pred && ev1p->pred))
result = (ev0p->pred == NULL && ev1p->pred == NULL);
} else {
else
{
FunctionCall3(&giststate->equalFn,
PointerGetDatum(ev0p->pred),
PointerGetDatum(datum),
PointerGetDatum(&result));
}
if ( result ) {
if (result)
{
/* not need to update key */
pfree(datum);
} else {
}
else
{
gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page,
ev0p->offset, datumsize, FALSE);
@ -728,19 +768,27 @@ gistSplit(Relation r,
InsertIndexResult *res)
{
Page p;
Buffer leftbuf, rightbuf;
Page left, right;
OffsetNumber *spl_left, *spl_right;
IndexTuple *lvectup, *rvectup, *newtup;
int leftoff, rightoff;
BlockNumber lbknum, rbknum;
Buffer leftbuf,
rightbuf;
Page left,
right;
OffsetNumber *spl_left,
*spl_right;
IndexTuple *lvectup,
*rvectup,
*newtup;
int leftoff,
rightoff;
BlockNumber lbknum,
rbknum;
GISTPageOpaque opaque;
char isnull;
GIST_SPLITVEC v;
bytea *entryvec;
bool *decompvec;
GISTENTRY tmpentry;
int i, nlen;
int i,
nlen;
p = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(p);
@ -801,29 +849,37 @@ gistSplit(Relation r,
pfree(entryvec);
pfree(decompvec);
spl_left = v.spl_left; spl_right = v.spl_right;
spl_left = v.spl_left;
spl_right = v.spl_right;
/* form left and right vector */
lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nleft);
rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nright);
leftoff = rightoff = 0;
for( i=1; i <= *len; i++ ) {
if (i == *(spl_left) || ( i==*len && *(spl_left) != FirstOffsetNumber ) ) {
for (i = 1; i <= *len; i++)
{
if (i == *(spl_left) || (i == *len && *(spl_left) != FirstOffsetNumber))
{
lvectup[leftoff++] = itup[i - 1];
spl_left++;
} else {
}
else
{
rvectup[rightoff++] = itup[i - 1];
spl_right++;
}
}
/* write on disk (may be need another split) */
if ( gistnospace(right, rvectup, v.spl_nright) ) {
if (gistnospace(right, rvectup, v.spl_nright))
{
nlen = v.spl_nright;
newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
(res && rvectup[nlen - 1] == itup[*len - 1]) ? res : NULL);
ReleaseBuffer(rightbuf);
} else {
}
else
{
OffsetNumber l;
l = gistwritebuffer(r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate);
@ -845,7 +901,8 @@ gistSplit(Relation r,
ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1);
}
if ( gistnospace(left, lvectup, v.spl_nleft) ) {
if (gistnospace(left, lvectup, v.spl_nleft))
{
int llen = v.spl_nleft;
IndexTuple *lntup;
@ -855,7 +912,9 @@ gistSplit(Relation r,
newtup = gistjoinvector(newtup, &nlen, lntup, llen);
pfree(lntup);
} else {
}
else
{
OffsetNumber l;
l = gistwritebuffer(r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate);
@ -1182,7 +1241,8 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
GISTPageOpaque opaque;
IndexTuple which;
ItemId iid;
OffsetNumber i,maxoff;
OffsetNumber i,
maxoff;
BlockNumber cblk;
char *pred;
@ -1198,7 +1258,8 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
elog(NOTICE, "%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, (opaque->flags & F_LEAF) ? "LEAF" : "INTE", (int) blk, (int) maxoff, PageGetFreeSpace(page));
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
iid = PageGetItemId(page, i);
which = (IndexTuple) PageGetItem(page, iid);
cblk = ItemPointerGetBlockNumber(&(which->t_tid));
@ -1206,13 +1267,13 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
elog(NOTICE, "%s Tuple. blk: %d size: %d", pred, (int) cblk, IndexTupleSize(which));
#endif
if ( ! ( opaque->flags & F_LEAF ) ) {
if (!(opaque->flags & F_LEAF))
gist_dumptree(r, level + 1, cblk, i);
}
}
ReleaseBuffer(buffer);
pfree(pred);
}
#endif /* defined GISTDEBUG */
void
@ -1231,4 +1292,3 @@ void
gist_desc(char *buf, uint8 xl_info, char *rec)
{
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.49 2001/02/22 21:48:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.50 2001/03/22 03:59:12 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -45,8 +45,10 @@ hashbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -59,9 +61,11 @@ hashbuild(PG_FUNCTION_ARGS)
nitups;
HashItem hitem;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@ -117,6 +121,7 @@ hashbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -191,9 +196,7 @@ hashbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -245,8 +248,10 @@ hashinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
InsertIndexResult res;
HashItem hitem;
@ -327,8 +332,10 @@ Datum
hashrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1);
#endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.29 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.30 2001/03/22 03:59:13 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.110 2001/01/24 19:42:47 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -564,6 +564,7 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
)
);
}
#endif /* defined(DISABLE_COMPLEX_MACRO) */
@ -791,8 +792,8 @@ heap_beginscan(Relation relation,
scan->rs_nkeys = (short) nkeys;
/*
* we do this here instead of in initscan() because heap_rescan
* also calls initscan() and we don't want to allocate memory again
* we do this here instead of in initscan() because heap_rescan also
* calls initscan() and we don't want to allocate memory again
*/
if (nkeys)
scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
@ -1411,10 +1412,10 @@ heap_insert(Relation relation, HeapTuple tup)
WriteBuffer(buffer);
/*
* If tuple is cachable, mark it for rollback from the caches
* in case we abort. Note it is OK to do this after WriteBuffer
* releases the buffer, because the "tup" data structure is all
* in local memory, not in the shared buffer.
* If tuple is cachable, mark it for rollback from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
* buffer, because the "tup" data structure is all in local memory,
* not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, tup);
@ -1551,9 +1552,10 @@ l1:
#endif
/*
* Mark tuple for invalidation from system caches at next command boundary.
* We have to do this before WriteBuffer because we need to look at the
* contents of the tuple, so we need to hold our refcount on the buffer.
* Mark tuple for invalidation from system caches at next command
* boundary. We have to do this before WriteBuffer because we need to
* look at the contents of the tuple, so we need to hold our refcount
* on the buffer.
*/
RelationInvalidateHeapTuple(relation, &tp);
@ -1636,6 +1638,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
oldtup.t_len = ItemIdGetLength(lp);
oldtup.t_self = *otid;
/*
* Note: beyond this point, use oldtup not otid to refer to old tuple.
* otid may very well point at newtup->t_self, which we will overwrite
@ -1701,18 +1704,19 @@ l2:
/*
* If the toaster needs to be activated, OR if the new tuple will not
* fit on the same page as the old, then we need to release the context
* lock (but not the pin!) on the old tuple's buffer while we are off
* doing TOAST and/or table-file-extension work. We must mark the old
* tuple to show that it's already being updated, else other processes
* may try to update it themselves. To avoid second XLOG log record,
* we use xact mgr hook to unlock old tuple without reading log if xact
* will abort before update is logged. In the event of crash prio logging,
* TQUAL routines will see HEAP_XMAX_UNLOGGED flag...
* fit on the same page as the old, then we need to release the
* context lock (but not the pin!) on the old tuple's buffer while we
* are off doing TOAST and/or table-file-extension work. We must mark
* the old tuple to show that it's already being updated, else other
* processes may try to update it themselves. To avoid second XLOG log
* record, we use xact mgr hook to unlock old tuple without reading
* log if xact will abort before update is logged. In the event of
* crash prio logging, TQUAL routines will see HEAP_XMAX_UNLOGGED
* flag...
*
* NOTE: this trick is useless currently but saved for future
* when we'll implement UNDO and will re-use transaction IDs
* after postmaster startup.
* NOTE: this trick is useless currently but saved for future when we'll
* implement UNDO and will re-use transaction IDs after postmaster
* startup.
*
* We need to invoke the toaster if there are already any toasted values
* present, or if the new tuple is over-threshold.
@ -1814,10 +1818,10 @@ l2:
WriteBuffer(buffer);
/*
* If new tuple is cachable, mark it for rollback from the caches
* in case we abort. Note it is OK to do this after WriteBuffer
* releases the buffer, because the "newtup" data structure is all
* in local memory, not in the shared buffer.
* If new tuple is cachable, mark it for rollback from the caches in
* case we abort. Note it is OK to do this after WriteBuffer releases
* the buffer, because the "newtup" data structure is all in local
* memory, not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, newtup);
@ -2320,7 +2324,8 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
return;
}
}
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_delete_undo: bad page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@ -2420,7 +2425,8 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
}
/* undo insert */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_insert_undo: bad page LSN");
elog(STOP, "heap_insert_undo: unimplemented");
@ -2470,7 +2476,8 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
goto newt;
}
}
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_update_undo: bad old tuple page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@ -2596,7 +2603,8 @@ newsame:;
}
/* undo */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
elog(STOP, "heap_update_undo: bad new tuple page LSN");
elog(STOP, "heap_update_undo: unimplemented");
@ -2645,7 +2653,8 @@ _heap_unlock_tuple(void *data)
return;
}
void heap_redo(XLogRecPtr lsn, XLogRecord *record)
void
heap_redo(XLogRecPtr lsn, XLogRecord *record)
{
uint8 info = record->xl_info & ~XLR_INFO_MASK;
@ -2664,7 +2673,8 @@ void heap_redo(XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_redo: unknown op code %u", info);
}
void heap_undo(XLogRecPtr lsn, XLogRecord *record)
void
heap_undo(XLogRecPtr lsn, XLogRecord *record)
{
uint8 info = record->xl_info & ~XLR_INFO_MASK;
@ -2701,18 +2711,21 @@ heap_desc(char *buf, uint8 xl_info, char* rec)
if (info == XLOG_HEAP_INSERT)
{
xl_heap_insert *xlrec = (xl_heap_insert *) rec;
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_DELETE)
{
xl_heap_delete *xlrec = (xl_heap_delete *) rec;
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_UPDATE || info == XLOG_HEAP_MOVE)
{
xl_heap_update *xlrec = (xl_heap_update *) rec;
if (info == XLOG_HEAP_UPDATE)
strcat(buf, "update: ");
else
@ -2725,6 +2738,7 @@ heap_desc(char *buf, uint8 xl_info, char* rec)
else if (info == XLOG_HEAP_CLEAN)
{
xl_heap_clean *xlrec = (xl_heap_clean *) rec;
sprintf(buf + strlen(buf), "clean: node %u/%u; blk %u",
xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Id: hio.c,v 1.35 2001/01/24 19:42:48 momjian Exp $
* $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.17 2001/02/15 20:57:01 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -448,7 +448,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
/* incompressible data, ignore on subsequent compression passes */
/*
* incompressible data, ignore on subsequent compression
* passes
*/
toast_action[i] = 'x';
}
}
@ -565,7 +569,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
/* incompressible data, ignore on subsequent compression passes */
/*
* incompressible data, ignore on subsequent compression
* passes
*/
toast_action[i] = 'x';
}
}

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.48 2001/01/24 19:42:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.49 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -263,6 +263,7 @@ StrategyTermEvaluate(StrategyTerm term,
return result;
}
#endif
/* ----------------
@ -465,6 +466,7 @@ RelationInvokeStrategy(Relation relation,
}
#endif
/* ----------------
@ -597,9 +599,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
}
if (cachesearch)
{
ReleaseSysCache(tuple);
}
else
{
heap_endscan(scan);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.40 2001/01/24 19:42:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.41 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
*
@ -236,9 +236,10 @@ bttextcmp(PG_FUNCTION_ARGS)
if (res == 0 && VARSIZE(a) != VARSIZE(b))
{
/*
* The two strings are the same in the first len bytes,
* and they are of different lengths.
* The two strings are the same in the first len bytes, and they
* are of different lengths.
*/
if (VARSIZE(a) < VARSIZE(b))
res = -1;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.81 2001/02/07 23:35:33 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.82 2001/03/22 03:59:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -114,8 +114,8 @@ top:
buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE);
/*
* If we're not allowing duplicates, make sure the key isn't
* already in the index. XXX this belongs somewhere else, likely
* If we're not allowing duplicates, make sure the key isn't already
* in the index. XXX this belongs somewhere else, likely
*/
if (index_is_unique)
{
@ -171,8 +171,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
maxoff = PageGetMaxOffsetNumber(page);
/*
* Find first item >= proposed new item. Note we could also get
* a pointer to end-of-page here.
* Find first item >= proposed new item. Note we could also get a
* pointer to end-of-page here.
*/
offset = _bt_binsrch(rel, buf, natts, itup_scankey);
@ -187,13 +187,13 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
BlockNumber nblkno;
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
* how we handling NULLs - and so we must not use _bt_compare
* in real comparison, but only for ordering/finding items on
* pages. - vadim 03/24/97
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
* handling NULLs - and so we must not use _bt_compare in real
* comparison, but only for ordering/finding items on pages. -
* vadim 03/24/97
*
* make sure the offset points to an actual key
* before trying to compare it...
* make sure the offset points to an actual key before trying to
* compare it...
*/
if (offset <= maxoff)
{
@ -201,10 +201,10 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
break; /* we're past all the equal tuples */
/*
* Have to check is inserted heap tuple deleted one (i.e.
* just moved to another place by vacuum)! We only need to
* do this once, but don't want to do it at all unless
* we see equal tuples, so as not to slow down unequal case.
* Have to check is inserted heap tuple deleted one (i.e. just
* moved to another place by vacuum)! We only need to do this
* once, but don't want to do it at all unless we see equal
* tuples, so as not to slow down unequal case.
*/
if (chtup)
{
@ -238,6 +238,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/* Tell _bt_doinsert to wait... */
return xwait;
}
/*
* Otherwise we have a definite conflict.
*/
@ -365,9 +366,7 @@ _bt_insertonpg(Relation rel,
* Determine exactly where new item will go.
*/
if (afteritem > 0)
{
newitemoff = afteritem + 1;
}
else
{
/*----------
@ -404,10 +403,11 @@ _bt_insertonpg(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
movedright = true;
}
/*
* Now we are on the right page, so find the insert position.
* If we moved right at all, we know we should insert at the
* start of the page, else must find the position by searching.
* Now we are on the right page, so find the insert position. If
* we moved right at all, we know we should insert at the start of
* the page, else must find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
@ -418,9 +418,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its
* result, so this comparison is correct even though we appear to
* be accounting only for the item and not for its line pointer.
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
* so this comparison is correct even though we appear to be
* accounting only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@ -489,10 +489,11 @@ _bt_insertonpg(Relation rel,
if (stack == (BTStack) NULL)
{
elog(DEBUG, "btree: concurrent ROOT page split");
/*
* If root page splitter failed to create new root page
* then old root' btpo_parent still points to metapage.
* We have to fix root page in this case.
* then old root' btpo_parent still points to metapage. We
* have to fix root page in this case.
*/
if (BTreeInvalidParent(lpageop))
{
@ -531,9 +532,9 @@ _bt_insertonpg(Relation rel,
* item! We want to find parent pointing to where we are,
* right ? - vadim 05/27/97
*
* Interestingly, this means we didn't *really* need to stack
* the parent key at all; all we really care about is the
* saved block and offset as a starting point for our search...
* Interestingly, this means we didn't *really* need to stack the
* parent key at all; all we really care about is the saved
* block and offset as a starting point for our search...
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@ -598,6 +599,7 @@ _bt_insertuple(Relation rel, Buffer buf,
XLogRecPtr recptr;
XLogRecData rdata[2];
BTItemData truncitem;
xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), BufferGetBlockNumber(buf), newitemoff);
rdata[0].buffer = InvalidBuffer;
@ -700,8 +702,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/*
* If the page we're splitting is not the rightmost page at its level
* in the tree, then the first entry on the page is the high key
* for the page. We need to copy that to the right half. Otherwise
* in the tree, then the first entry on the page is the high key for
* the page. We need to copy that to the right half. Otherwise
* (meaning the rightmost page case), all the items on the right half
* will be user data.
*/
@ -812,11 +814,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
* We have to grab the right sibling (if any) and fix the prev
* pointer there. We are guaranteed that this is deadlock-free
* since no other writer will be holding a lock on that page
* and trying to move left, and all readers release locks on a page
* before trying to fetch its neighbors.
* We have to grab the right sibling (if any) and fix the prev pointer
* there. We are guaranteed that this is deadlock-free since no other
* writer will be holding a lock on that page and trying to move left,
* and all readers release locks on a page before trying to fetch its
* neighbors.
*/
if (!P_RIGHTMOST(ropaque))
@ -856,6 +858,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
BlockIdSet(&(xlrec.parentblk), lopaque->btpo_parent);
BlockIdSet(&(xlrec.leftblk), lopaque->btpo_prev);
BlockIdSet(&(xlrec.rightblk), ropaque->btpo_next);
/*
* Dirrect access to page is not good but faster - we should
* implement some new func in page API.
@ -881,6 +884,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
if (!P_RIGHTMOST(ropaque))
{
BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
sopaque->btpo_prev = BufferGetBlockNumber(rbuf);
rdata[2].next = &(rdata[3]);
@ -978,13 +982,13 @@ _bt_findsplitloc(Relation rel,
+sizeof(ItemIdData);
/*
* Finding the best possible split would require checking all the possible
* split points, because of the high-key and left-key special cases.
* That's probably more work than it's worth; instead, stop as soon as
* we find a "good-enough" split, where good-enough is defined as an
* imbalance in free space of no more than pagesize/16 (arbitrary...)
* This should let us stop near the middle on most pages, instead of
* plowing to the end.
* Finding the best possible split would require checking all the
* possible split points, because of the high-key and left-key special
* cases. That's probably more work than it's worth; instead, stop as
* soon as we find a "good-enough" split, where good-enough is defined
* as an imbalance in free space of no more than pagesize/16
* (arbitrary...) This should let us stop near the middle on most
* pages, instead of plowing to the end.
*/
goodenough = leftspace / 16;
@ -1024,6 +1028,7 @@ _bt_findsplitloc(Relation rel,
*/
leftfree = leftspace - dataitemstoleft - (int) itemsz;
rightfree = rightspace - (dataitemtotal - dataitemstoleft);
/*
* Will the new item go to left or right of split?
*/
@ -1051,8 +1056,8 @@ _bt_findsplitloc(Relation rel,
}
/*
* I believe it is not possible to fail to find a feasible split,
* but just in case ...
* I believe it is not possible to fail to find a feasible split, but
* just in case ...
*/
if (!state.have_split)
elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
@ -1071,6 +1076,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
int leftfree, int rightfree,
bool newitemonleft, Size firstrightitemsz)
{
/*
* Account for the new item on whichever side it is to be put.
*/
@ -1078,13 +1084,15 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
leftfree -= (int) state->newitemsz;
else
rightfree -= (int) state->newitemsz;
/*
* If we are not on the leaf level, we will be able to discard the
* key data from the first item that winds up on the right page.
* If we are not on the leaf level, we will be able to discard the key
* data from the first item that winds up on the right page.
*/
if (state->non_leaf)
rightfree += (int) firstrightitemsz -
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
/*
* If feasible split point, remember best delta.
*/
@ -1134,10 +1142,11 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
maxoff = PageGetMaxOffsetNumber(page);
start = stack->bts_offset;
/*
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
* case of concurrent ROOT page split. Also, watch out for
* possibility that page has a high key now when it didn't before.
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the case of
* concurrent ROOT page split. Also, watch out for possibility that
* page has a high key now when it didn't before.
*/
if (start < P_FIRSTDATAKEY(opaque))
start = P_FIRSTDATAKEY(opaque);
@ -1159,7 +1168,11 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
return buf;
}
}
/* by here, the item we're looking for moved right at least one page */
/*
* by here, the item we're looking for moved right at least one
* page
*/
if (P_RIGHTMOST(opaque))
{
_bt_relbuf(rel, buf, access);
@ -1236,9 +1249,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rpage = BufferGetPage(rbuf);
/*
* Make sure pages in old root level have valid parent links --- we will
* need this in _bt_insertonpg() if a concurrent root split happens (see
* README).
* Make sure pages in old root level have valid parent links --- we
* will need this in _bt_insertonpg() if a concurrent root split
* happens (see README).
*/
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
@ -1264,8 +1277,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
pfree(new_item);
/*
* Create downlink item for right page. The key for it is obtained from
* the "high key" position in the left page.
* Create downlink item for right page. The key for it is obtained
* from the "high key" position in the left page.
*/
itemid = PageGetItemId(lpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid);
@ -1346,11 +1359,18 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
Page oldrootpage = BufferGetPage(oldrootbuf);
BTPageOpaque oldrootopaque = (BTPageOpaque)
PageGetSpecialPointer(oldrootpage);
Buffer buf, leftbuf, rightbuf;
Page page, leftpage, rightpage;
BTPageOpaque opaque, leftopaque, rightopaque;
Buffer buf,
leftbuf,
rightbuf;
Page page,
leftpage,
rightpage;
BTPageOpaque opaque,
leftopaque,
rightopaque;
OffsetNumber newitemoff;
BTItem btitem, ritem;
BTItem btitem,
ritem;
Size itemsz;
if (!P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
@ -1377,13 +1397,13 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
*
* If concurrent process will split one of pages on this level then it
* will see either btpo_parent == metablock or btpo_parent == rootblk.
* In first case it will give up its locks and walk to the leftmost page
* (oldrootbuf) in _bt_fixup() - ie it will wait for us and let us
* continue. In second case it will try to lock rootbuf keeping its locks
* on buffers we already passed, also waiting for us. If we'll have to
* unlock rootbuf (split it) and that process will have to split page
* of new level we created (level of rootbuf) then it will wait while
* we create upper level. Etc.
* In first case it will give up its locks and walk to the leftmost
* page (oldrootbuf) in _bt_fixup() - ie it will wait for us and let
* us continue. In second case it will try to lock rootbuf keeping its
* locks on buffers we already passed, also waiting for us. If we'll
* have to unlock rootbuf (split it) and that process will have to
* split page of new level we created (level of rootbuf) then it will
* wait while we create upper level. Etc.
*/
while (!P_RIGHTMOST(leftopaque))
{
@ -1392,11 +1412,11 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
rightopaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
/*
* Update LSN & StartUpID of child page buffer to ensure that
* it will be written on disk after flushing log record for new
* root creation. Unfortunately, for the moment (?) we do not
* log this operation and so possibly break our rule to log entire
* page content on first after checkpoint modification.
* Update LSN & StartUpID of child page buffer to ensure that it
* will be written on disk after flushing log record for new root
* creation. Unfortunately, for the moment (?) we do not log this
* operation and so possibly break our rule to log entire page
* content on first after checkpoint modification.
*/
HOLD_INTERRUPTS();
rightopaque->btpo_parent = rootblk;
@ -1450,10 +1470,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
/*
* Here we hold locks on old root buffer, new root buffer we've
* created with _bt_newroot() - rootbuf, - and buf we've used
* for last insert ops - buf. If rootbuf != buf then we have to
* create at least one more level. And if "release" is TRUE
* then we give up oldrootbuf.
* created with _bt_newroot() - rootbuf, - and buf we've used for last
* insert ops - buf. If rootbuf != buf then we have to create at least
* one more level. And if "release" is TRUE then we give up
* oldrootbuf.
*/
if (release)
_bt_wrtbuf(rel, oldrootbuf);
@ -1543,7 +1563,8 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
Page cpage[3];
BTPageOpaque copaque[3];
BTItem btitem;
int cidx, i;
int cidx,
i;
bool goodbye = false;
char tbuf[BLCKSZ];
@ -1572,9 +1593,10 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
for (;;)
{
/*
* Read up to 2 more child pages and look for pointers
* to them in *saved* parent page
* Read up to 2 more child pages and look for pointers to them in
* *saved* parent page
*/
coff[1] = coff[2] = InvalidOffsetNumber;
for (cidx = 0; cidx < 2;)
@ -1766,7 +1788,8 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
BlockNumber blkno = true_stack->bts_blkno;
BTStackData stack;
BTPageOpaque opaque;
Buffer buf, rbuf;
Buffer buf,
rbuf;
Page page;
OffsetNumber offnum;
@ -1779,8 +1802,8 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
_bt_fixlevel(rel, buf, rblkno);
/*
* Here parent level should have pointers for both
* lblkno and rblkno and we have to find them.
* Here parent level should have pointers for both lblkno and
* rblkno and we have to find them.
*/
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
@ -1829,10 +1852,10 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
}
/*
* Well, we are on the level that was root or unexistent when
* we started traversing tree down. If btpo_parent is updated
* then we'll use it to continue, else we'll fix/restore upper
* levels entirely.
* Well, we are on the level that was root or unexistent when we
* started traversing tree down. If btpo_parent is updated then
* we'll use it to continue, else we'll fix/restore upper levels
* entirely.
*/
if (!BTreeInvalidParent(opaque))
{
@ -1882,10 +1905,10 @@ _bt_fixup(Relation rel, Buffer buf)
{
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* If someone else already created parent pages
* then it's time for _bt_fixtree() to check upper
* levels and fix them, if required.
* If someone else already created parent pages then it's time for
* _bt_fixtree() to check upper levels and fix them, if required.
*/
if (!BTreeInvalidParent(opaque))
{
@ -1904,9 +1927,8 @@ _bt_fixup(Relation rel, Buffer buf)
}
/*
* Ok, we are on the leftmost page, it's write locked
* by us and its btpo_parent points to meta page - time
* for _bt_fixroot().
* Ok, we are on the leftmost page, it's write locked by us and its
* btpo_parent points to meta page - time for _bt_fixroot().
*/
elog(NOTICE, "bt_fixup[%s]: fixing root page", RelationGetRelationName(rel));
buf = _bt_fixroot(rel, buf, true);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.50 2001/02/07 23:35:33 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.51 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -212,6 +212,7 @@ _bt_getroot(Relation rel, int access)
}
else
{
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
@ -239,10 +240,11 @@ _bt_getroot(Relation rel, int access)
if (!P_ISROOT(rootopaque))
{
/*
* It happened, but if root page splitter failed to create
* new root page then we'll go in loop trying to call
* _bt_getroot again and again.
* It happened, but if root page splitter failed to create new
* root page then we'll go in loop trying to call _bt_getroot
* again and again.
*/
if (FixBTree)
{
@ -270,16 +272,18 @@ check_parent:;
/* rootbuf is read locked */
goto check_parent;
}
else /* someone else already fixed root */
else
/* someone else already fixed root */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
}
}
/*
* Ok, here we have old root page with btpo_parent pointing
* to upper level - check parent page because of there is
* good chance that parent is root page.
* Ok, here we have old root page with btpo_parent pointing to
* upper level - check parent page because of there is good
* chance that parent is root page.
*/
newrootbuf = _bt_getbuf(rel, rootopaque->btpo_parent, BT_READ);
_bt_relbuf(rel, rootbuf, BT_READ);

View File

@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.78 2001/02/07 23:35:33 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.79 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -30,6 +30,7 @@
bool BuildingBtree = false; /* see comment in btbuild() */
bool FastBuild = true; /* use sort/build instead */
/* of insertion build */
@ -56,8 +57,10 @@ btbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -69,9 +72,11 @@ btbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@ -80,10 +85,11 @@ btbuild(PG_FUNCTION_ARGS)
bool usefast;
Snapshot snapshot;
TransactionId XmaxRecent;
/*
* spool2 is needed only when the index is an unique index.
* Dead tuples are put into spool2 instead of spool in
* order to avoid uniqueness check.
* spool2 is needed only when the index is an unique index. Dead
* tuples are put into spool2 instead of spool in order to avoid
* uniqueness check.
*/
BTSpool *spool2 = NULL;
bool tupleIsAlive;
@ -155,9 +161,9 @@ btbuild(PG_FUNCTION_ARGS)
if (usefast)
{
spool = _bt_spoolinit(index, indexInfo->ii_Unique);
/*
* Different from spool,the uniqueness isn't checked
* for spool2.
* Different from spool,the uniqueness isn't checked for spool2.
*/
if (indexInfo->ii_Unique)
spool2 = _bt_spoolinit(index, false);
@ -193,6 +199,7 @@ btbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -253,8 +260,7 @@ btbuild(PG_FUNCTION_ARGS)
* btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
* Sure, it's just rule for placing/finding items and no more -
* keytest'll return FALSE for a = 5 for items having 'a' isNULL.
* Look at _bt_compare for how it works.
* - vadim 03/23/97
* Look at _bt_compare for how it works. - vadim 03/23/97
*
* if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
*/
@ -271,7 +277,8 @@ btbuild(PG_FUNCTION_ARGS)
{
if (tupleIsAlive || !spool2)
_bt_spool(btitem, spool);
else /* dead tuples are put into spool2 */
else
/* dead tuples are put into spool2 */
{
dead_count++;
_bt_spool(btitem, spool2);
@ -296,9 +303,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -408,10 +413,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
/*
* Restore scan position using heap TID returned by previous call
* to btgettuple(). _bt_restscan() re-grabs the read lock on
* the buffer, too.
* to btgettuple(). _bt_restscan() re-grabs the read lock on the
* buffer, too.
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@ -421,8 +427,8 @@ btgettuple(PG_FUNCTION_ARGS)
/*
* Save heap TID to use it in _bt_restscan. Then release the read
* lock on the buffer so that we aren't blocking other backends.
* NOTE: we do keep the pin on the buffer!
* lock on the buffer so that we aren't blocking other backends. NOTE:
* we do keep the pin on the buffer!
*/
if (res)
{
@ -462,8 +468,10 @@ Datum
btrescan(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1);
#endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
@ -671,8 +679,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
* Get back the read lock we were holding on the buffer.
* (We still have a reference-count pin on it, though.)
* Get back the read lock we were holding on the buffer. (We still
* have a reference-count pin on it, though.)
*/
LockBuffer(buf, BT_READ);
@ -694,8 +702,8 @@ _bt_restscan(IndexScanDesc scan)
}
/*
* The item we were on may have moved right due to insertions.
* Find it again.
* The item we were on may have moved right due to insertions. Find it
* again.
*/
for (;;)
{
@ -717,7 +725,8 @@ _bt_restscan(IndexScanDesc scan)
}
/*
* By here, the item we're looking for moved right at least one page
* By here, the item we're looking for moved right at least one
* page
*/
if (P_RIGHTMOST(opaque))
elog(FATAL, "_bt_restscan: my bits moved right off the end of the world!"
@ -898,7 +907,8 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
else /* undo */
else
/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad left sibling LSN");
@ -936,7 +946,8 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
else /* undo */
else
/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad right sibling LSN");
@ -1092,18 +1103,21 @@ btree_desc(char *buf, uint8 xl_info, char* rec)
if (info == XLOG_BTREE_INSERT)
{
xl_btree_insert *xlrec = (xl_btree_insert *) rec;
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_DELETE)
{
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_SPLIT || info == XLOG_BTREE_SPLEFT)
{
xl_btree_split *xlrec = (xl_btree_split *) rec;
sprintf(buf + strlen(buf), "split(%s): ",
(info == XLOG_BTREE_SPLIT) ? "right" : "left");
out_target(buf, &(xlrec->target));
@ -1114,6 +1128,7 @@ btree_desc(char *buf, uint8 xl_info, char* rec)
else if (info == XLOG_BTREE_NEWROOT)
{
xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
sprintf(buf + strlen(buf), "root: node %u/%u; blk %u",
xlrec->node.tblNode, xlrec->node.relNode,
BlockIdGetBlockNumber(&xlrec->rootblk));

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.63 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -79,13 +79,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
* We need to save the bit image of the index entry we chose in the
* parent page on a stack. In case we split the tree, we'll use this
* bit image to figure out what our real parent page is, in case the
* parent splits while we're working lower in the tree. See the paper
* by Lehman and Yao for how this is detected and handled. (We use the
* child link to disambiguate duplicate keys in the index -- Lehman
* and Yao disallow duplicate keys.)
* We need to save the bit image of the index entry we chose in
* the parent page on a stack. In case we split the tree, we'll
* use this bit image to figure out what our real parent page is,
* in case the parent splits while we're working lower in the
* tree. See the paper by Lehman and Yao for how this is detected
* and handled. (We use the child link to disambiguate duplicate
* keys in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@ -98,9 +98,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getbuf(rel, blkno, BT_READ);
/*
* Race -- the page we just grabbed may have split since we read its
* pointer in the parent. If it has, we may need to move right to its
* new sibling. Do that.
* Race -- the page we just grabbed may have split since we read
* its pointer in the parent. If it has, we may need to move
* right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@ -458,8 +458,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_orderkeys(rel, so);
/*
* Quit now if _bt_orderkeys() discovered that the scan keys can
* never be satisfied (eg, x == 1 AND x > 2).
* Quit now if _bt_orderkeys() discovered that the scan keys can never
* be satisfied (eg, x == 1 AND x > 2).
*/
if (!so->qual_ok)
return (RetrieveIndexResult) NULL;
@ -484,17 +484,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
strat = _bt_getstrat(rel, attno,
so->keyData[i].sk_procedure);
/*
* Can we use this key as a starting boundary for this attr?
*
* We can use multiple keys if they look like, say, = >= =
* but we have to stop after accepting a > or < boundary.
* We can use multiple keys if they look like, say, = >= = but we
* have to stop after accepting a > or < boundary.
*/
if (strat == strat_total ||
strat == BTEqualStrategyNumber)
{
nKeyIs[keysCount++] = i;
}
else if (ScanDirectionIsBackward(dir) &&
(strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber))
@ -536,7 +535,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
for (i = 0; i < keysCount; i++)
{
j = nKeyIs[i];
/* _bt_orderkeys disallows it, but it's place to add some code later */
/*
* _bt_orderkeys disallows it, but it's place to add some code
* later
*/
if (so->keyData[j].sk_flags & SK_ISNULL)
{
pfree(nKeyIs);
@ -601,6 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
switch (strat_total)
{
case BTLessStrategyNumber:
/*
* Back up one to arrive at last item < scankey
*/
@ -612,6 +616,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTLessEqualStrategyNumber:
/*
* We need to find the last item <= scankey, so step forward
* till we find one > scankey, then step back one.
@ -645,9 +650,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTEqualStrategyNumber:
/*
* Make sure we are on the first equal item; might have to step
* forward if currently at end of page.
* Make sure we are on the first equal item; might have to
* step forward if currently at end of page.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@ -662,6 +668,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
result = _bt_compare(rel, keysCount, scankeys, page, offnum);
if (result != 0)
goto nomatches; /* no equal items! */
/*
* If a backward scan was specified, need to start with last
* equal item not first one.
@ -685,6 +692,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterEqualStrategyNumber:
/*
* We want the first item >= scankey, which is where we are...
* unless we're not anywhere at all...
@ -700,9 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterStrategyNumber:
/*
* We want the first item > scankey, so make sure we are on
* an item and then step over any equal items.
* We want the first item > scankey, so make sure we are on an
* item and then step over any equal items.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@ -850,11 +859,12 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
*bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* If the adjacent page just split, then we have to walk
* right to find the block that's now adjacent to where
* we were. Because pages only split right, we don't have
* to worry about this failing to terminate.
* right to find the block that's now adjacent to where we
* were. Because pages only split right, we don't have to
* worry about this failing to terminate.
*/
while (opaque->btpo_next != obknum)
{
@ -981,7 +991,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page);
if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */
if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty
* page */
start = P_FIRSTDATAKEY(opaque);
}
else
@ -995,8 +1006,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so->btso_curbuf = buf;
/*
* Left/rightmost page could be empty due to deletions,
* if so step till we find a nonempty page.
* Left/rightmost page could be empty due to deletions, if so step
* till we find a nonempty page.
*/
if (start > maxoff)
{

View File

@ -35,7 +35,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.59 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -73,10 +73,12 @@ typedef struct BTPageState
{
Buffer btps_buf; /* current buffer & page */
Page btps_page;
BTItem btps_minkey; /* copy of minimum key (first item) on page */
BTItem btps_minkey; /* copy of minimum key (first item) on
* page */
OffsetNumber btps_lastoff; /* last item offset loaded */
int btps_level; /* tree level (0 = leaf) */
Size btps_full; /* "full" if less than this much free space */
Size btps_full; /* "full" if less than this much free
* space */
struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState;
@ -352,6 +354,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
if (pgspc < btisz || pgspc < state->btps_full)
{
/*
* Item won't fit on this page, or we feel the page is full enough
* already. Finish off the page and write it out.
@ -388,9 +391,9 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/*
* Link the old buffer into its parent, using its minimum key.
* If we don't have a parent, we have to create one;
* this adds a new btree level.
* Link the old buffer into its parent, using its minimum key. If
* we don't have a parent, we have to create one; this adds a new
* btree level.
*/
if (state->btps_next == (BTPageState *) NULL)
{
@ -405,8 +408,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* Save a copy of the minimum key for the new page. We have to
* copy it off the old page, not the new one, in case we are
* not at leaf level.
* copy it off the old page, not the new one, in case we are not
* at leaf level.
*/
state->btps_minkey = _bt_formitem(&(obti->bti_itup));
@ -414,13 +417,13 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
* Set the sibling links for both pages, and parent links too.
*
* It's not necessary to set the parent link at all, because it's
* only used for handling concurrent root splits, but we may as well
* do it as a debugging aid. Note we set new page's link as well
* as old's, because if the new page turns out to be the last of
* the level, _bt_uppershutdown won't change it. The links may be
* out of date by the time the build finishes, but that's OK; they
* need only point to a left-sibling of the true parent. See the
* README file for more info.
* only used for handling concurrent root splits, but we may as
* well do it as a debugging aid. Note we set new page's link as
* well as old's, because if the new page turns out to be the last
* of the level, _bt_uppershutdown won't change it. The links may
* be out of date by the time the build finishes, but that's OK;
* they need only point to a left-sibling of the true parent. See
* the README file for more info.
*/
{
BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage);
@ -449,8 +452,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* If the new item is the first for its page, stash a copy for later.
* Note this will only happen for the first item on a level; on later
* pages, the first item for a page is copied from the prior page
* in the code above.
* pages, the first item for a page is copied from the prior page in
* the code above.
*/
if (last_off == P_HIKEY)
{
@ -493,8 +496,8 @@ _bt_uppershutdown(Relation index, BTPageState *state)
*
* If we're at the top, it's the root, so attach it to the metapage.
* Otherwise, add an entry for it to its parent using its minimum
* key. This may cause the last page of the parent level to split,
* but that's not a problem -- we haven't gotten to it yet.
* key. This may cause the last page of the parent level to
* split, but that's not a problem -- we haven't gotten to it yet.
*/
if (s->btps_next == (BTPageState *) NULL)
{
@ -529,21 +532,28 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
{
BTPageState *state = NULL;
bool merge = (btspool2 != NULL);
BTItem bti, bti2 = NULL;
bool should_free, should_free2, load1;
BTItem bti,
bti2 = NULL;
bool should_free,
should_free2,
load1;
TupleDesc tupdes = RelationGetDescr(index);
int i, keysz = RelationGetNumberOfAttributes(index);
int i,
keysz = RelationGetNumberOfAttributes(index);
ScanKey indexScanKey = NULL;
if (merge)
{
/*
* Another BTSpool for dead tuples exists.
* Now we have to merge btspool and btspool2.
* Another BTSpool for dead tuples exists. Now we have to merge
* btspool and btspool2.
*/
ScanKey entry;
Datum attrDatum1, attrDatum2;
bool isFirstNull, isSecondNull;
Datum attrDatum1,
attrDatum2;
bool isFirstNull,
isSecondNull;
int32 compare;
/* the preparation of merge */
@ -613,7 +623,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
_bt_freeskey(indexScanKey);
}
else /* merge is unnecessary */
else
/* merge is unnecessary */
{
while (bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free), bti != (BTItem) NULL)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.42 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.43 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -240,8 +240,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
/*
* Initialize for processing of keys for attr 1.
*
* xform[i] holds a copy of the current scan key of strategy type i+1,
* if any; init[i] is TRUE if we have found such a key for this attr.
* xform[i] holds a copy of the current scan key of strategy type i+1, if
* any; init[i] is TRUE if we have found such a key for this attr.
*/
attno = 1;
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
@ -263,7 +263,9 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (cur->sk_flags & SK_ISNULL)
{
so->qual_ok = false;
/* Quit processing so we don't try to invoke comparison
/*
* Quit processing so we don't try to invoke comparison
* routines on NULLs.
*/
return;
@ -271,8 +273,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
/*
* If we are at the end of the keys for a particular attr,
* finish up processing and emit the cleaned-up keys.
* If we are at the end of the keys for a particular attr, finish
* up processing and emit the cleaned-up keys.
*/
if (i == numberOfKeys || cur->sk_attno != attno)
{
@ -313,6 +315,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
else
{
/*
* No "=" for this key, so we're done with required keys
*/
@ -355,8 +358,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* Emit the cleaned-up keys back into the key[] array in the
* correct order. Note we are overwriting our input here!
* It's OK because (a) xform[] is a physical copy of the keys
* we want, (b) we cannot emit more keys than we input, so
* we won't overwrite as-yet-unprocessed keys.
* we want, (b) we cannot emit more keys than we input, so we
* won't overwrite as-yet-unprocessed keys.
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
{
@ -409,7 +412,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (DatumGetBool(test))
xform[j].sk_argument = cur->sk_argument;
else if (j == (BTEqualStrategyNumber - 1))
so->qual_ok = false; /* key == a && key == b, but a != b */
so->qual_ok = false; /* key == a && key == b, but a !=
* b */
}
else
{
@ -473,16 +477,18 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (isNull)
{
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual
* is one of the "must match" subset. On a backward scan,
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if (keysok < so->numberOfRequiredKeys &&
ScanDirectionIsForward(dir))
*continuescan = false;
/*
* In any case, this indextuple doesn't match the qual.
*/
@ -498,9 +504,10 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (DatumGetBool(test) == !!(key->sk_flags & SK_NEGATE))
{
/*
* Tuple fails this qual. If it's a required qual, then
* we can conclude no further tuples will pass, either.
* Tuple fails this qual. If it's a required qual, then we
* can conclude no further tuples will pass, either.
*/
if (keysok < so->numberOfRequiredKeys)
*continuescan = false;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.24 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.25 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.31 2001/01/24 19:42:49 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.32 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -70,6 +70,7 @@ Datum
rt_box_size(PG_FUNCTION_ARGS)
{
BOX *a = PG_GETARG_BOX_P(0);
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
@ -155,13 +156,15 @@ Datum
rt_poly_size(PG_FUNCTION_ARGS)
{
Pointer aptr = PG_GETARG_POINTER(0);
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
POLYGON *a;
double xdim,
ydim;
/* Can't just use GETARG because of possibility that input is NULL;
/*
* Can't just use GETARG because of possibility that input is NULL;
* since POLYGON is toastable, GETARG will try to inspect its value
*/
if (aptr == NULL)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.60 2001/03/07 21:20:26 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.61 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -88,8 +88,10 @@ rtbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
Node *oldPred = (Node *) PG_GETARG_POINTER(3);
#ifdef NOT_USED
IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
#endif
HeapScanDesc hscan;
HeapTuple htup;
@ -101,9 +103,11 @@ rtbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@ -171,6 +175,7 @@ rtbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -232,9 +237,7 @@ rtbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -282,8 +285,10 @@ rtinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
InsertIndexResult res;
IndexTuple itup;
@ -665,10 +670,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/*
* This is a hack. Right now, we force rtree internal keys to be constant
* size. To fix this, need delete the old key and add both left and
* right for the two new pages. The insertion of left may force a
* split if the new left key is bigger than the old key.
* This is a hack. Right now, we force rtree internal keys to be
* constant size. To fix this, need delete the old key and add both
* left and right for the two new pages. The insertion of left may
* force a split if the new left key is bigger than the old key.
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
@ -794,9 +799,10 @@ rtpicksplit(Relation r,
right_avail_space;
/*
* First, make sure the new item is not so large that we can't possibly
* fit it on a page, even by itself. (It's sufficient to make this test
* here, since any oversize tuple must lead to a page split attempt.)
* First, make sure the new item is not so large that we can't
* possibly fit it on a page, even by itself. (It's sufficient to
* make this test here, since any oversize tuple must lead to a page
* split attempt.)
*/
newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace)
@ -804,7 +810,8 @@ rtpicksplit(Relation r,
(unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
maxoff = PageGetMaxOffsetNumber(page);
newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */
newitemoff = OffsetNumberNext(maxoff); /* phony index for new
* item */
/* Make arrays big enough for worst case, including sentinel */
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@ -827,8 +834,8 @@ rtpicksplit(Relation r,
item_2_sz = IndexTupleTotalSize(item_2);
/*
* Ignore seed pairs that don't leave room for the new item
* on either split page.
* Ignore seed pairs that don't leave room for the new item on
* either split page.
*/
if (newitemsz + item_1_sz > RTPageAvailSpace &&
newitemsz + item_2_sz > RTPageAvailSpace)
@ -841,8 +848,10 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_union));
inter_d = FunctionCall2(&rtstate->interFn,
datum_alpha, datum_beta);
/* The interFn may return a NULL pointer (not an SQL null!)
* to indicate no intersection. sizeFn must cope with this.
/*
* The interFn may return a NULL pointer (not an SQL null!) to
* indicate no intersection. sizeFn must cope with this.
*/
FunctionCall2(&rtstate->sizeFn, inter_d,
PointerGetDatum(&size_inter));
@ -869,6 +878,7 @@ rtpicksplit(Relation r,
if (firsttime)
{
/*
* There is no possible split except to put the new item on its
* own page. Since we still have to compute the union rectangles,
@ -922,8 +932,8 @@ rtpicksplit(Relation r,
/*
* If we've already decided where to place this item, just put it
* on the correct list. Otherwise, we need to figure out which page
* needs the least enlargement in order to store the item.
* on the correct list. Otherwise, we need to figure out which
* page needs the least enlargement in order to store the item.
*/
if (i == seed_1)
@ -961,12 +971,13 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_beta));
/*
* We prefer the page that shows smaller enlargement of its union area
* (Guttman's algorithm), but we must take care that at least one page
* will still have room for the new item after this one is added.
* We prefer the page that shows smaller enlargement of its union
* area (Guttman's algorithm), but we must take care that at least
* one page will still have room for the new item after this one
* is added.
*
* (We know that all the old items together can fit on one page,
* so we need not worry about any other problem than failing to fit
* (We know that all the old items together can fit on one page, so
* we need not worry about any other problem than failing to fit
* the new item.)
*/
left_feasible = (left_avail_space >= item_1_sz &&

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.35 2001/01/24 19:42:50 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.36 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.41 2001/03/18 20:18:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.28 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains support functions for the high

View File

@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.37 2001/03/18 20:18:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.38 2001/03/22 03:59:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -32,9 +32,10 @@ VariableCache ShmemVariableCache = NULL;
void
GetNewTransactionId(TransactionId *xid)
{
/*
* During bootstrap initialization, we return the special
* bootstrap transaction id.
* During bootstrap initialization, we return the special bootstrap
* transaction id.
*/
if (AMI_OVERRIDE)
{
@ -60,9 +61,10 @@ GetNewTransactionId(TransactionId *xid)
void
ReadNewTransactionId(TransactionId *xid)
{
/*
* During bootstrap initialization, we return the special
* bootstrap transaction id.
* During bootstrap initialization, we return the special bootstrap
* transaction id.
*/
if (AMI_OVERRIDE)
{
@ -130,10 +132,9 @@ CheckMaxObjectId(Oid assigned_oid)
}
/*
* We have exceeded the logged oid range.
* We should lock the database and kill all other backends
* but we are loading oid's that we can not guarantee are unique
* anyway, so we must rely on the user.
* We have exceeded the logged oid range. We should lock the database
* and kill all other backends but we are loading oid's that we can
* not guarantee are unique anyway, so we must rely on the user.
*/
XLogPutNextOid(assigned_oid + VAR_OID_PREFETCH);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.99 2001/03/13 01:17:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@ -222,7 +222,8 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
int CommitDelay = 0; /* precommit delay in microseconds */
int CommitSiblings = 5; /* number of concurrent xacts needed to sleep */
int CommitSiblings = 5; /* number of concurrent xacts needed to
* sleep */
static void (*_RollbackFunc) (void *) = NULL;
static void *_RollbackData = NULL;
@ -679,20 +680,21 @@ RecordTransactionCommit()
rdata.next = NULL;
START_CRIT_SECTION();
/*
* SHOULD SAVE ARRAY OF RELFILENODE-s TO DROP
*/
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT, &rdata);
/*
* Sleep before commit! So we can flush more than one
* commit records per single fsync. (The idea is some other
* backend may do the XLogFlush while we're sleeping. This
* needs work still, because on most Unixen, the minimum
* select() delay is 10msec or more, which is way too long.)
* Sleep before commit! So we can flush more than one commit
* records per single fsync. (The idea is some other backend may
* do the XLogFlush while we're sleeping. This needs work still,
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
*
* We do not sleep if enableFsync is not turned on, nor if there
* are fewer than CommitSiblings other backends with active
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
@ -896,10 +898,8 @@ AtAbort_Memory(void)
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
}
else
{
MemoryContextSwitchTo(TopMemoryContext);
}
}
/* ----------------------------------------------------------------
@ -1021,6 +1021,7 @@ CurrentXactInProgress(void)
{
return CurrentTransactionState->state == TRANS_INPROGRESS;
}
#endif
/* --------------------------------
@ -1143,15 +1144,16 @@ AbortTransaction(void)
/*
* Release any spinlocks or buffer context locks we might be holding
* as quickly as possible. (Real locks, however, must be held till
* we finish aborting.) Releasing spinlocks is critical since we
* might try to grab them again while cleaning up!
* as quickly as possible. (Real locks, however, must be held till we
* finish aborting.) Releasing spinlocks is critical since we might
* try to grab them again while cleaning up!
*/
ProcReleaseSpins(NULL);
UnlockBuffers();
/*
* Also clean up any open wait for lock, since the lock manager
* will choke if we try to wait for another lock before doing this.
* Also clean up any open wait for lock, since the lock manager will
* choke if we try to wait for another lock before doing this.
*/
LockWaitCancel();
@ -1327,8 +1329,8 @@ StartTransactionCommand(void)
}
/*
* We must switch to TransactionCommandContext before returning.
* This is already done if we called StartTransaction, otherwise not.
* We must switch to TransactionCommandContext before returning. This
* is already done if we called StartTransaction, otherwise not.
*/
Assert(TransactionCommandContext != NULL);
MemoryContextSwitchTo(TransactionCommandContext);
@ -1765,9 +1767,7 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
/* SHOULD REMOVE FILES OF ALL DROPPED RELATIONS */
}
else if (info == XLOG_XACT_ABORT)
{
TransactionIdAbort(record->xl_xid);
}
else
elog(STOP, "xact_redo: unknown op code %u", info);
}

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: xid.c,v 1.29 2001/01/24 19:42:51 momjian Exp $
* $Id: xid.c,v 1.30 2001/03/22 03:59:18 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING
@ -49,6 +49,7 @@ Datum
xidout(PG_FUNCTION_ARGS)
{
TransactionId transactionId = PG_GETARG_TRANSACTIONID(0);
/* maximum 32 bit unsigned integer representation takes 10 chars */
char *representation = palloc(11);

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.62 2001/03/18 20:18:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.63 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -50,7 +50,8 @@
*/
#define SYNC_METHOD_FSYNC 0
#define SYNC_METHOD_FDATASYNC 1
#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and O_DSYNC */
#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and
* O_DSYNC */
#if defined(O_SYNC)
#define OPEN_SYNC_FLAG O_SYNC
@ -91,11 +92,13 @@
/* User-settable parameters */
int CheckPointSegments = 3;
int XLOGbuffers = 8;
int XLOGfiles = 0; /* how many files to pre-allocate during ckpt */
int XLOGfiles = 0; /* how many files to pre-allocate during
* ckpt */
int XLOG_DEBUG = 0;
char *XLOG_sync_method = NULL;
const char XLOG_sync_method_default[] = DEFAULT_SYNC_METHOD_STR;
char XLOG_archive_dir[MAXPGPATH]; /* null string means delete 'em */
char XLOG_archive_dir[MAXPGPATH]; /* null string means
* delete 'em */
/* these are derived from XLOG_sync_method by assign_xlog_sync_method */
static int sync_method = DEFAULT_SYNC_METHOD;
@ -229,6 +232,7 @@ typedef struct XLogCtlData
XLogwrtResult LogwrtResult;
/* Protected by logwrt_lck: */
XLogCtlWrite Write;
/*
* These values do not change after startup, although the pointed-to
* pages and xlblocks values certainly do. Permission to read/write
@ -384,8 +388,10 @@ static int readFile = -1;
static uint32 readId = 0;
static uint32 readSeg = 0;
static uint32 readOff = 0;
/* Buffer for currently read page (BLCKSZ bytes) */
static char *readBuf = NULL;
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr;
static XLogRecPtr EndRecPtr;
@ -463,8 +469,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
}
/*
* In bootstrap mode, we don't actually log anything but XLOG resources;
* return a phony record pointer.
* In bootstrap mode, we don't actually log anything but XLOG
* resources; return a phony record pointer.
*/
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
@ -479,16 +485,17 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* header isn't added into the CRC yet since we don't know the final
* length or info bits quite yet.
*
* We may have to loop back to here if a race condition is detected below.
* We could prevent the race by doing all this work while holding the
* insert spinlock, but it seems better to avoid doing CRC calculations
* while holding the lock. This means we have to be careful about
* modifying the rdata list until we know we aren't going to loop back
* again. The only change we allow ourselves to make earlier is to set
* rdt->data = NULL in list items we have decided we will have to back
* up the whole buffer for. This is OK because we will certainly decide
* the same thing again for those items if we do it over; doing it here
* saves an extra pass over the list later.
* We may have to loop back to here if a race condition is detected
* below. We could prevent the race by doing all this work while
* holding the insert spinlock, but it seems better to avoid doing CRC
* calculations while holding the lock. This means we have to be
* careful about modifying the rdata list until we know we aren't
* going to loop back again. The only change we allow ourselves to
* make earlier is to set rdt->data = NULL in list items we have
* decided we will have to back up the whole buffer for. This is OK
* because we will certainly decide the same thing again for those
* items if we do it over; doing it here saves an extra pass over the
* list later.
*/
begin:;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -528,6 +535,7 @@ begin:;
{
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
/*
* XXX We assume page LSN is first data on page
*/
@ -596,9 +604,9 @@ begin:;
S_UNLOCK(&(XLogCtl->info_lck));
/*
* If cache is half filled then try to acquire logwrt lock
* and do LOGWRT work, but only once per XLogInsert call.
* Ignore any fractional blocks in performing this check.
* If cache is half filled then try to acquire logwrt lock and
* do LOGWRT work, but only once per XLogInsert call. Ignore
* any fractional blocks in performing this check.
*/
LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ;
if (do_logwrt &&
@ -625,8 +633,9 @@ begin:;
/*
* Check to see if my RedoRecPtr is out of date. If so, may have to
* go back and recompute everything. This can only happen just after a
* checkpoint, so it's better to be slow in this case and fast otherwise.
* go back and recompute everything. This can only happen just after
* a checkpoint, so it's better to be slow in this case and fast
* otherwise.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
@ -640,9 +649,10 @@ begin:;
if (dtbuf_bkp[i] == false &&
XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
/*
* Oops, this buffer now needs to be backed up, but we didn't
* think so above. Start over.
* Oops, this buffer now needs to be backed up, but we
* didn't think so above. Start over.
*/
S_UNLOCK(&(XLogCtl->insert_lck));
END_CRIT_SECTION();
@ -658,8 +668,9 @@ begin:;
* this loop, write_len includes the backup block data.
*
* Also set the appropriate info bits to show which buffers were backed
* up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
* buffer value (ignoring InvalidBuffer) appearing in the rdata list.
* up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th
* distinct buffer value (ignoring InvalidBuffer) appearing in the
* rdata list.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -795,14 +806,15 @@ begin:;
freespace = INSERT_FREESPACE(Insert);
/*
* The recptr I return is the beginning of the *next* record.
* This will be stored as LSN for changed data pages...
* The recptr I return is the beginning of the *next* record. This
* will be stored as LSN for changed data pages...
*/
INSERT_RECPTR(RecPtr, Insert, curridx);
/* Need to update shared LogwrtRqst if some block was filled up */
if (freespace < SizeOfXLogRecord)
updrqst = true; /* curridx is filled and available for writing out */
updrqst = true; /* curridx is filled and available for
* writing out */
else
curridx = PrevBufIdx(curridx);
WriteRqst = XLogCtl->xlblocks[curridx];
@ -850,9 +862,9 @@ AdvanceXLInsertBuffer(void)
LogwrtResult = Insert->LogwrtResult;
/*
* Get ending-offset of the buffer page we need to replace (this may be
* zero if the buffer hasn't been used yet). Fall through if it's already
* written out.
* Get ending-offset of the buffer page we need to replace (this may
* be zero if the buffer hasn't been used yet). Fall through if it's
* already written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
@ -883,8 +895,8 @@ AdvanceXLInsertBuffer(void)
}
/*
* LogwrtResult lock is busy or we know the page is still dirty.
* Try to acquire logwrt lock and write full blocks.
* LogwrtResult lock is busy or we know the page is still
* dirty. Try to acquire logwrt lock and write full blocks.
*/
if (!TAS(&(XLogCtl->logwrt_lck)))
{
@ -896,9 +908,10 @@ AdvanceXLInsertBuffer(void)
Insert->LogwrtResult = LogwrtResult;
break;
}
/*
* Have to write buffers while holding insert lock.
* This is not good, so only write as much as we absolutely
* Have to write buffers while holding insert lock. This
* is not good, so only write as much as we absolutely
* must.
*/
WriteRqst.Write = OldPageRqstPtr;
@ -934,9 +947,10 @@ AdvanceXLInsertBuffer(void)
Insert->curridx = nextidx;
Insert->currpage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ);
Insert->currpos = ((char *) Insert->currpage) + SizeOfXLogPHD;
/*
* Be sure to re-zero the buffer so that bytes beyond what we've written
* will look like zeroes and not valid XLOG records...
* Be sure to re-zero the buffer so that bytes beyond what we've
* written will look like zeroes and not valid XLOG records...
*/
MemSet((char *) Insert->currpage, 0, BLCKSZ);
Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC;
@ -959,11 +973,15 @@ XLogWrite(XLogwrtRqst WriteRqst)
bool ispartialpage;
bool use_existent;
/* Update local LogwrtResult (caller probably did this already, but...) */
/*
* Update local LogwrtResult (caller probably did this already,
* but...)
*/
LogwrtResult = Write->LogwrtResult;
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{
/*
* Make sure we're not ahead of the insert process. This could
* happen if we're passed a bogus WriteRqst.Write that is past the
@ -979,6 +997,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
/*
* Switch to new logfile segment.
*/
@ -1011,11 +1030,12 @@ XLogWrite(XLogwrtRqst WriteRqst)
ControlFile->logSeg = openLogSeg + 1;
ControlFile->time = time(NULL);
UpdateControlFile();
/*
* Signal postmaster to start a checkpoint if it's been too
* long since the last one. (We look at local copy of
* RedoRecPtr which might be a little out of date, but should
* be close enough for this purpose.)
* Signal postmaster to start a checkpoint if it's been
* too long since the last one. (We look at local copy of
* RedoRecPtr which might be a little out of date, but
* should be close enough for this purpose.)
*/
if (IsUnderPostmaster &&
(openLogId != RedoRecPtr.xlogid ||
@ -1056,9 +1076,9 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* If we just wrote the whole last page of a logfile segment,
* fsync the segment immediately. This avoids having to go back
* and re-open prior segments when an fsync request comes along later.
* Doing it here ensures that one and only one backend will perform
* this fsync.
* and re-open prior segments when an fsync request comes along
* later. Doing it here ensures that one and only one backend will
* perform this fsync.
*/
if (openLogOff >= XLogSegSize && !ispartialpage)
{
@ -1081,10 +1101,11 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{
/*
* Could get here without iterating above loop, in which case
* we might have no open file or the wrong one. However, we do
* not need to fsync more than one file.
* Could get here without iterating above loop, in which case we
* might have no open file or the wrong one. However, we do not
* need to fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN)
{
@ -1110,8 +1131,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* Update shared-memory status
*
* We make sure that the shared 'request' values do not fall behind
* the 'result' values. This is not absolutely essential, but it saves
* We make sure that the shared 'request' values do not fall behind the
* 'result' values. This is not absolutely essential, but it saves
* some code in a couple of places.
*/
S_LOCK(&(XLogCtl->info_lck));
@ -1163,8 +1184,9 @@ XLogFlush(XLogRecPtr record)
* Since fsync is usually a horribly expensive operation, we try to
* piggyback as much data as we can on each fsync: if we see any more
* data entered into the xlog buffer, we'll write and fsync that too,
* so that the final value of LogwrtResult.Flush is as large as possible.
* This gives us some chance of avoiding another fsync immediately after.
* so that the final value of LogwrtResult.Flush is as large as
* possible. This gives us some chance of avoiding another fsync
* immediately after.
*/
/* initialize to given target; may increase below */
@ -1192,9 +1214,7 @@ XLogFlush(XLogRecPtr record)
uint32 freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord) /* buffer is full */
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
}
else
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
@ -1257,7 +1277,8 @@ XLogFileInit(uint32 log, uint32 seg,
XLogFileName(path, log, seg);
/*
* Try to use existent file (checkpoint maker may have created it already)
* Try to use existent file (checkpoint maker may have created it
* already)
*/
if (*use_existent)
{
@ -1274,10 +1295,10 @@ XLogFileInit(uint32 log, uint32 seg,
}
/*
* Initialize an empty (all zeroes) segment. NOTE: it is possible that
* another process is doing the same thing. If so, we will end up
* pre-creating an extra log segment. That seems OK, and better than
* holding the spinlock throughout this lengthy process.
* Initialize an empty (all zeroes) segment. NOTE: it is possible
* that another process is doing the same thing. If so, we will end
* up pre-creating an extra log segment. That seems OK, and better
* than holding the spinlock throughout this lengthy process.
*/
snprintf(tmppath, MAXPGPATH, "%s%cxlogtemp.%d",
XLogDir, SEP_CHAR, (int) getpid());
@ -1306,7 +1327,10 @@ XLogFileInit(uint32 log, uint32 seg,
{
int save_errno = errno;
/* If we fail to make the file, delete it to release disk space */
/*
* If we fail to make the file, delete it to release disk
* space
*/
unlink(tmppath);
errno = save_errno;
@ -1337,9 +1361,7 @@ XLogFileInit(uint32 log, uint32 seg,
strcpy(targpath, path);
if (!*use_existent)
{
unlink(targpath);
}
else
{
while ((fd = BasicOpenFile(targpath, O_RDWR | PG_BINARY,
@ -1569,7 +1591,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
COMP_CRC64(crc, blk + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(crc);
memcpy((char*)&cbuf, blk, sizeof(crc64)); /* don't assume alignment */
memcpy((char *) &cbuf, blk, sizeof(crc64)); /* don't assume
* alignment */
if (!EQ_CRC64(cbuf, crc))
{
@ -1609,13 +1632,14 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
if (readBuf == NULL)
{
/*
* First time through, permanently allocate readBuf. We do it
* this way, rather than just making a static array, for two
* reasons: (1) no need to waste the storage in most instantiations
* of the backend; (2) a static char array isn't guaranteed to
* have any particular alignment, whereas malloc() will provide
* MAXALIGN'd storage.
* reasons: (1) no need to waste the storage in most
* instantiations of the backend; (2) a static char array isn't
* guaranteed to have any particular alignment, whereas malloc()
* will provide MAXALIGN'd storage.
*/
readBuf = (char *) malloc(BLCKSZ);
Assert(readBuf != NULL);
@ -1688,9 +1712,10 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
got_record:;
/*
* Currently, xl_len == 0 must be bad data, but that might not be
* true forever. See note in XLogInsert.
* Currently, xl_len == 0 must be bad data, but that might not be true
* forever. See note in XLogInsert.
*/
if (record->xl_len == 0)
{
@ -1698,8 +1723,10 @@ got_record:;
RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
/*
* Compute total length of record including any appended backup blocks.
* Compute total length of record including any appended backup
* blocks.
*/
total_len = SizeOfXLogRecord + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -1708,6 +1735,7 @@ got_record:;
continue;
total_len += sizeof(BkpBlock) + BLCKSZ;
}
/*
* Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea
@ -1839,14 +1867,16 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_info, readId, readSeg, readOff);
return false;
}
/*
* We disbelieve a SUI less than the previous page's SUI, or more
* than a few counts greater. In theory as many as 512 shutdown
* checkpoint records could appear on a 32K-sized xlog page, so
* that's the most differential there could legitimately be.
* We disbelieve a SUI less than the previous page's SUI, or more than
* a few counts greater. In theory as many as 512 shutdown checkpoint
* records could appear on a 32K-sized xlog page, so that's the most
* differential there could legitimately be.
*
* Note this check can only be applied when we are reading the next page
* in sequence, so ReadRecord passes a flag indicating whether to check.
* in sequence, so ReadRecord passes a flag indicating whether to
* check.
*/
if (checkSUI)
{
@ -1891,8 +1921,10 @@ WriteControlFile(void)
{
int fd;
char buffer[BLCKSZ]; /* need not be aligned */
#ifdef USE_LOCALE
char *localeptr;
#endif
/*
@ -1911,10 +1943,11 @@ WriteControlFile(void)
if (!localeptr)
elog(STOP, "Invalid LC_CTYPE setting");
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
/*
* Issue warning notice if initdb'ing in a locale that will not permit
* LIKE index optimization. This is not a clean place to do it, but
* I don't see a better place either...
* LIKE index optimization. This is not a clean place to do it, but I
* don't see a better place either...
*/
if (!locale_is_like_safe())
elog(NOTICE, "Initializing database with %s collation order."
@ -1936,11 +1969,11 @@ WriteControlFile(void)
FIN_CRC64(ControlFile->crc);
/*
* We write out BLCKSZ bytes into pg_control, zero-padding the
* excess over sizeof(ControlFileData). This reduces the odds
* of premature-EOF errors when reading pg_control. We'll still
* fail when we check the contents of the file, but hopefully with
* a more specific error than "couldn't read pg_control".
* We write out BLCKSZ bytes into pg_control, zero-padding the excess
* over sizeof(ControlFileData). This reduces the odds of
* premature-EOF errors when reading pg_control. We'll still fail
* when we check the contents of the file, but hopefully with a more
* specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
elog(STOP, "sizeof(ControlFileData) is too large ... fix xlog.c");
@ -2002,10 +2035,11 @@ ReadControlFile(void)
elog(STOP, "Invalid CRC in control file");
/*
* Do compatibility checking immediately. We do this here for 2 reasons:
* Do compatibility checking immediately. We do this here for 2
* reasons:
*
* (1) if the database isn't compatible with the backend executable,
* we want to abort before we can possibly do any damage;
* (1) if the database isn't compatible with the backend executable, we
* want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file
@ -2096,6 +2130,7 @@ XLOGShmemInit(void)
Assert(!found);
memset(XLogCtl, 0, sizeof(XLogCtlData));
/*
* Since XLogCtlData contains XLogRecPtr fields, its sizeof should be
* a multiple of the alignment for same, so no extra alignment padding
@ -2104,9 +2139,10 @@ XLOGShmemInit(void)
XLogCtl->xlblocks = (XLogRecPtr *)
(((char *) XLogCtl) + sizeof(XLogCtlData));
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
/*
* Here, on the other hand, we must MAXALIGN to ensure the page buffers
* have worst-case alignment.
* Here, on the other hand, we must MAXALIGN to ensure the page
* buffers have worst-case alignment.
*/
XLogCtl->pages =
((char *) XLogCtl) + MAXALIGN(sizeof(XLogCtlData) +
@ -2114,8 +2150,8 @@ XLOGShmemInit(void)
memset(XLogCtl->pages, 0, BLCKSZ * XLOGbuffers);
/*
* Do basic initialization of XLogCtl shared data.
* (StartupXLOG will fill in additional info.)
* Do basic initialization of XLogCtl shared data. (StartupXLOG will
* fill in additional info.)
*/
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
@ -2246,8 +2282,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
* Note: in most control paths, *ControlFile is already valid and we
* need not do ReadControlFile() here, but might as well do it to be sure.
* Note: in most control paths, *ControlFile is already valid and we need
* not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@ -2297,10 +2333,8 @@ StartupXLOG(void)
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
{
elog(STOP, "Unable to locate a valid CheckPoint record");
}
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
@ -2336,9 +2370,7 @@ StartupXLOG(void)
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
{
InRecovery = true;
}
/* REDO */
if (InRecovery)
@ -2355,7 +2387,8 @@ StartupXLOG(void)
/* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr))
record = ReadRecord(&(checkPoint.redo), STOP, buffer);
else /* read past CheckPoint record */
else
/* read past CheckPoint record */
record = ReadRecord(NULL, LOG, buffer);
if (record != NULL)
@ -2411,8 +2444,11 @@ StartupXLOG(void)
XLogCtl->xlblocks[0].xrecoff =
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
Insert = &XLogCtl->Insert;
/* Tricky point here: readBuf contains the *last* block that the LastRec
* record spans, not the one it starts in, which is what we want.
/*
* Tricky point here: readBuf contains the *last* block that the
* LastRec record spans, not the one it starts in, which is what we
* want.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@ -2458,6 +2494,7 @@ StartupXLOG(void)
if (InRecovery)
{
/*
* In case we had to use the secondary checkpoint, make sure that
* it will still be shown as the secondary checkpoint after this
@ -2639,17 +2676,17 @@ CreateCheckPoint(bool shutdown)
/*
* If this isn't a shutdown, and we have not inserted any XLOG records
* since the start of the last checkpoint, skip the checkpoint. The
* idea here is to avoid inserting duplicate checkpoints when the system
* is idle. That wastes log space, and more importantly it exposes us to
* possible loss of both current and previous checkpoint records if the
* machine crashes just as we're writing the update. (Perhaps it'd make
* even more sense to checkpoint only when the previous checkpoint record
* is in a different xlog page?)
* idea here is to avoid inserting duplicate checkpoints when the
* system is idle. That wastes log space, and more importantly it
* exposes us to possible loss of both current and previous checkpoint
* records if the machine crashes just as we're writing the update.
* (Perhaps it'd make even more sense to checkpoint only when the
* previous checkpoint record is in a different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must match
* the end of the last checkpoint record, and its redo pointer must point
* to itself.
* the start of the last checkpoint: current insertion point must
* match the end of the last checkpoint record, and its redo pointer
* must point to itself.
*/
if (!shutdown)
{
@ -2687,16 +2724,18 @@ CreateCheckPoint(bool shutdown)
freespace = BLCKSZ - SizeOfXLogPHD;
}
INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx);
/*
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock.
*/
RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
/*
* Get UNDO record ptr - this is oldest of PROC->logRec values.
* We do this while holding insert lock to ensure that we won't miss
* any about-to-commit transactions (UNDO must include all xacts that
* have commits after REDO point).
* Get UNDO record ptr - this is oldest of PROC->logRec values. We do
* this while holding insert lock to ensure that we won't miss any
* about-to-commit transactions (UNDO must include all xacts that have
* commits after REDO point).
*/
checkPoint.undo = GetUndoRecPtr();
@ -2720,8 +2759,8 @@ CreateCheckPoint(bool shutdown)
SpinRelease(OidGenLockId);
/*
* Having constructed the checkpoint record, ensure all shmem disk buffers
* are flushed to disk.
* Having constructed the checkpoint record, ensure all shmem disk
* buffers are flushed to disk.
*/
FlushBufferPool();
@ -2748,9 +2787,9 @@ CreateCheckPoint(bool shutdown)
elog(STOP, "XLog concurrent activity while data base is shutting down");
/*
* Remember location of prior checkpoint's earliest info.
* Oldest item is redo or undo, whichever is older; but watch out
* for case that undo = 0.
* Remember location of prior checkpoint's earliest info. Oldest item
* is redo or undo, whichever is older; but watch out for case that
* undo = 0.
*/
if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
XLByteLT(ControlFile->checkPointCopy.undo,
@ -2846,9 +2885,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
/* In an ONLINE checkpoint, treat the counters like NEXTOID */
if (ShmemVariableCache->nextXid < checkPoint.nextXid)
{
ShmemVariableCache->nextXid = checkPoint.nextXid;
}
if (ShmemVariableCache->nextOid < checkPoint.nextOid)
{
ShmemVariableCache->nextOid = checkPoint.nextOid;
@ -2871,6 +2908,7 @@ xlog_desc(char *buf, uint8 xl_info, char* rec)
info == XLOG_CHECKPOINT_ONLINE)
{
CheckPoint *checkpoint = (CheckPoint *) rec;
sprintf(buf + strlen(buf), "checkpoint: redo %u/%u; undo %u/%u; "
"sui %u; xid %u; oid %u; %s",
checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
@ -2923,15 +2961,19 @@ xlog_outrec(char *buf, XLogRecord *record)
bool
check_xlog_sync_method(const char *method)
{
if (strcasecmp(method, "fsync") == 0) return true;
if (strcasecmp(method, "fsync") == 0)
return true;
#ifdef HAVE_FDATASYNC
if (strcasecmp(method, "fdatasync") == 0) return true;
if (strcasecmp(method, "fdatasync") == 0)
return true;
#endif
#ifdef OPEN_SYNC_FLAG
if (strcasecmp(method, "open_sync") == 0) return true;
if (strcasecmp(method, "open_sync") == 0)
return true;
#endif
#ifdef OPEN_DATASYNC_FLAG
if (strcasecmp(method, "open_datasync") == 0) return true;
if (strcasecmp(method, "open_datasync") == 0)
return true;
#endif
return false;
}
@ -2978,11 +3020,12 @@ assign_xlog_sync_method(const char *method)
if (sync_method != new_sync_method || open_sync_bit != new_sync_bit)
{
/*
* To ensure that no blocks escape unsynced, force an fsync on
* the currently open log segment (if any). Also, if the open
* flag is changing, close the log file so it will be reopened
* (with new flag bit) at next use.
* To ensure that no blocks escape unsynced, force an fsync on the
* currently open log segment (if any). Also, if the open flag is
* changing, close the log file so it will be reopened (with new
* flag bit) at next use.
*/
if (openLogFile >= 0)
{

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.14 2001/03/13 01:17:05 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.15 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -239,6 +239,7 @@ static XLogRelDesc *_xlrelarr = NULL;
static Form_pg_class _xlpgcarr = NULL;
static int _xlast = 0;
static int _xlcnt = 0;
#define _XLOG_RELCACHESIZE 512
static void

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.46 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.47 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* See acl.h.
@ -250,8 +250,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
num;
/*
* If ACL is null, default to "OK" --- this should not happen,
* since caller should have inserted appropriate default
* If ACL is null, default to "OK" --- this should not happen, since
* caller should have inserted appropriate default
*/
if (!acl)
{
@ -265,8 +265,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
/*
* We'll treat the empty ACL like that, too, although this is more
* like an error (i.e., you manually blew away your ACL array) -- the
* system never creates an empty ACL, since there must always be
* a "world" entry in the first slot.
* system never creates an empty ACL, since there must always be a
* "world" entry in the first slot.
*/
if (num < 1)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.39 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.40 2001/03/22 03:59:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.160 2001/02/14 21:34:59 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.161 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -270,7 +270,11 @@ heap_create(char *relname,
if (istemp)
{
/* replace relname of caller with a unique name for a temp relation */
/*
* replace relname of caller with a unique name for a temp
* relation
*/
snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u",
(int) MyProcPid, uniqueId++);
}
@ -738,6 +742,7 @@ AddNewRelationTuple(Relation pg_class_desc,
static void
AddNewRelationType(char *typeName, Oid new_rel_oid, Oid new_type_oid)
{
/*
* The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out
@ -1025,9 +1030,7 @@ RelationRemoveInheritance(Relation relation)
&entry);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
simple_heap_delete(catalogRelation, &tuple->t_self);
}
heap_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
@ -1152,8 +1155,8 @@ RelationTruncateIndexes(Oid heapId)
/*
* We have to re-open the heap rel each time through this loop
* because index_build will close it again. We need grab no lock,
* however, because we assume heap_truncate is holding an exclusive
* lock on the heap rel.
* however, because we assume heap_truncate is holding an
* exclusive lock on the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
@ -1164,8 +1167,8 @@ RelationTruncateIndexes(Oid heapId)
LockRelation(currentIndex, AccessExclusiveLock);
/*
* Drop any buffers associated with this index. If they're
* dirty, they're just dropped without bothering to flush to disk.
* Drop any buffers associated with this index. If they're dirty,
* they're just dropped without bothering to flush to disk.
*/
DropRelationBuffers(currentIndex);
@ -1177,6 +1180,7 @@ RelationTruncateIndexes(Oid heapId)
InitIndexStrategy(indexInfo->ii_NumIndexAttrs,
currentIndex, accessMethodId);
index_build(heapRelation, currentIndex, indexInfo, NULL);
/*
* index_build will close both the heap and index relations (but
* not give up the locks we hold on them).
@ -1981,9 +1985,7 @@ RemoveAttrDefault(Relation rel)
adscan = heap_beginscan(adrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(adscan, 0)))
{
simple_heap_delete(adrel, &tup->t_self);
}
heap_endscan(adscan);
heap_close(adrel, RowExclusiveLock);
@ -2005,9 +2007,7 @@ RemoveRelCheck(Relation rel)
rcscan = heap_beginscan(rcrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(rcscan, 0)))
{
simple_heap_delete(rcrel, &tup->t_self);
}
heap_endscan(rcscan);
heap_close(rcrel, RowExclusiveLock);
@ -2044,9 +2044,7 @@ RemoveStatistics(Relation rel)
scan = heap_beginscan(pgstatistic, false, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
simple_heap_delete(pgstatistic, &tuple->t_self);
}
heap_endscan(scan);
heap_close(pgstatistic, RowExclusiveLock);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.142 2001/02/23 09:31:52 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.143 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -301,7 +301,8 @@ ConstructTupleDescriptor(Relation heapRelation,
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
/*
* Fix the stuff that should not be the same as the underlying attr
* Fix the stuff that should not be the same as the underlying
* attr
*/
to->attnum = i + 1;
@ -311,9 +312,9 @@ ConstructTupleDescriptor(Relation heapRelation,
to->attcacheoff = -1;
/*
* We do not yet have the correct relation OID for the index,
* so just set it invalid for now. InitializeAttributeOids()
* will fix it later.
* We do not yet have the correct relation OID for the index, so
* just set it invalid for now. InitializeAttributeOids() will
* fix it later.
*/
to->attrelid = InvalidOid;
}
@ -1008,10 +1009,8 @@ index_create(char *heapRelationName,
/* XXX shouldn't we close the heap and index rels here? */
}
else
{
index_build(heapRelation, indexRelation, indexInfo, NULL);
}
}
/* ----------------------------------------------------------------
*
@ -1081,12 +1080,12 @@ index_drop(Oid indexId)
heap_freetuple(tuple);
/*
* Update the pg_class tuple for the owning relation. We are presently
* too lazy to attempt to compute the new correct value of relhasindex
* (the next VACUUM will fix it if necessary). But we must send out a
* shared-cache-inval notice on the owning relation to ensure other
* backends update their relcache lists of indexes. So, unconditionally
* do setRelhasindex(true).
* Update the pg_class tuple for the owning relation. We are
* presently too lazy to attempt to compute the new correct value of
* relhasindex (the next VACUUM will fix it if necessary). But we
* must send out a shared-cache-inval notice on the owning relation to
* ensure other backends update their relcache lists of indexes. So,
* unconditionally do setRelhasindex(true).
*/
setRelhasindex(heapId, true);
@ -1326,8 +1325,8 @@ LockClassinfoForUpdate(Oid relid, HeapTuple rtup,
Relation relationRelation;
/*
* NOTE: get and hold RowExclusiveLock on pg_class, because caller will
* probably modify the rel's pg_class tuple later on.
* NOTE: get and hold RowExclusiveLock on pg_class, because caller
* will probably modify the rel's pg_class tuple later on.
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
classTuple = SearchSysCache(RELOID, PointerGetDatum(relid),
@ -1513,7 +1512,8 @@ setRelhasindex(Oid relid, bool hasindex)
void
setNewRelfilenode(Relation relation)
{
Relation pg_class, idescs[Num_pg_class_indices];
Relation pg_class,
idescs[Num_pg_class_indices];
Oid newrelfilenode;
bool in_place_update = false;
HeapTupleData lockTupleData;
@ -1577,6 +1577,7 @@ setNewRelfilenode(Relation relation)
/* Make sure the relfilenode change */
CommandCounterIncrement();
}
#endif /* OLD_FILE_NAMING */
/* ----------------
@ -1713,6 +1714,7 @@ UpdateStats(Oid relid, long reltuples)
*/
if (in_place_upd)
{
/*
* At bootstrap time, we don't need to worry about concurrency or
* visibility of changes, so we cheat. Also cheat if REINDEX.
@ -1787,9 +1789,11 @@ DefaultBuild(Relation heapRelation,
long reltuples,
indtuples;
Node *predicate = indexInfo->ii_Predicate;
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
#endif
ExprContext *econtext;
InsertIndexResult insertResult;
@ -1855,6 +1859,7 @@ DefaultBuild(Relation heapRelation,
reltuples++;
#ifndef OMIT_PARTIAL_INDEX
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@ -1906,9 +1911,7 @@ DefaultBuild(Relation heapRelation,
#ifndef OMIT_PARTIAL_INDEX
if (predicate != NULL || oldPred != NULL)
{
ExecDropTupleTable(tupleTable, true);
}
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@ -2098,9 +2101,10 @@ reindex_index(Oid indexId, bool force, bool inplace)
if (inplace)
{
/*
* Release any buffers associated with this index. If they're dirty,
* they're just dropped without bothering to flush to disk.
* Release any buffers associated with this index. If they're
* dirty, they're just dropped without bothering to flush to disk.
*/
DropRelationBuffers(iRel);
@ -2164,18 +2168,24 @@ reindex_relation(Oid relid, bool force)
bool old,
reindexed;
bool deactivate_needed, overwrite, upd_pg_class_inplace;
bool deactivate_needed,
overwrite,
upd_pg_class_inplace;
#ifdef OLD_FILE_NAMING
overwrite = upd_pg_class_inplace = deactivate_needed = true;
#else
Relation rel;
overwrite = upd_pg_class_inplace = deactivate_needed = false;
/*
* avoid heap_update() pg_class tuples while processing
* reindex for pg_class.
* avoid heap_update() pg_class tuples while processing reindex for
* pg_class.
*/
if (IsIgnoringSystemIndexes())
upd_pg_class_inplace = true;
/*
* ignore the indexes of the target system relation while processing
* reindex.
@ -2184,10 +2194,10 @@ reindex_relation(Oid relid, bool force)
if (!IsIgnoringSystemIndexes() && IsSystemRelationName(NameStr(rel->rd_rel->relname)))
deactivate_needed = true;
#ifndef ENABLE_REINDEX_NAILED_RELATIONS
/*
* nailed relations are never updated.
* We couldn't keep the consistency between the relation
* descriptors and pg_class tuples.
* nailed relations are never updated. We couldn't keep the
* consistency between the relation descriptors and pg_class tuples.
*/
if (rel->rd_isnailed)
{
@ -2200,9 +2210,10 @@ reindex_relation(Oid relid, bool force)
elog(ERROR, "the target relation %u is nailed", relid);
}
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
/*
* Shared system indexes must be overwritten because it's
* impossible to update pg_class tuples of all databases.
* Shared system indexes must be overwritten because it's impossible
* to update pg_class tuples of all databases.
*/
if (IsSharedSystemRelationName(NameStr(rel->rd_rel->relname)))
{
@ -2252,24 +2263,27 @@ reindex_relation(Oid relid, bool force)
heap_endscan(scan);
heap_close(indexRelation, AccessShareLock);
if (reindexed)
/*
* Ok,we could use the reindexed indexes of the target
* system relation now.
* Ok,we could use the reindexed indexes of the target system
* relation now.
*/
{
if (deactivate_needed)
{
if (!overwrite && relid == RelOid_pg_class)
{
/*
* For pg_class, relhasindex should be set
* to true here in place.
* For pg_class, relhasindex should be set to true here in
* place.
*/
setRelhasindex(relid, true);
CommandCounterIncrement();
/*
* However the following setRelhasindex()
* is needed to keep consistency with WAL.
* However the following setRelhasindex() is needed to
* keep consistency with WAL.
*/
}
setRelhasindex(relid, true);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.76 2001/01/24 19:42:51 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.77 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.37 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.38 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -79,8 +79,8 @@ AggregateCreate(char *aggName,
/*
* Handle the aggregate's base type (input data type). This can be
* specified as 'ANY' for a data-independent transition function,
* such as COUNT(*).
* specified as 'ANY' for a data-independent transition function, such
* as COUNT(*).
*/
basetype = GetSysCacheOid(TYPENAME,
PointerGetDatum(aggbasetypeName),
@ -118,9 +118,7 @@ AggregateCreate(char *aggName,
nargs = 2;
}
else
{
nargs = 1;
}
tup = SearchSysCache(PROCNAME,
PointerGetDatum(aggtransfnName),
Int32GetDatum(nargs),
@ -134,11 +132,12 @@ AggregateCreate(char *aggName,
if (proc->prorettype != transtype)
elog(ERROR, "AggregateCreate: return type of '%s' is not '%s'",
aggtransfnName, aggtranstypeName);
/*
* If the transfn is strict and the initval is NULL, make sure
* input type and transtype are the same (or at least binary-
* compatible), so that it's OK to use the first input value
* as the initial transValue.
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary- compatible),
* so that it's OK to use the first input value as the initial
* transValue.
*/
if (proc->proisstrict && agginitval == NULL)
{
@ -168,6 +167,7 @@ AggregateCreate(char *aggName,
}
else
{
/*
* If no finalfn, aggregate result type is type of the state value
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.7 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.8 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.56 2001/03/22 03:59:20 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.53 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.54 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -247,8 +247,8 @@ ProcedureCreate(char *procedureName,
* symbol. Also check for a valid function information record.
*
* We used to perform these checks only when the function was first
* called, but it seems friendlier to verify the library's validity
* at CREATE FUNCTION time.
* called, but it seems friendlier to verify the library's validity at
* CREATE FUNCTION time.
*/
if (languageObjectId == ClanguageId)
@ -355,7 +355,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlist = parse->targetList;
/*
* The last query must be a SELECT if and only if there is a return type.
* The last query must be a SELECT if and only if there is a return
* type.
*/
if (rettype == InvalidOid)
{
@ -375,8 +376,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlistlen = ExecCleanTargetListLength(tlist);
/*
* For base-type returns, the target list should have exactly one entry,
* and its type should agree with what the user declared.
* For base-type returns, the target list should have exactly one
* entry, and its type should agree with what the user declared.
*/
typerelid = typeidTypeRelid(rettype);
if (typerelid == InvalidOid)
@ -397,8 +398,8 @@ checkretval(Oid rettype, List *queryTreeList)
* If the target list is of length 1, and the type of the varnode in
* the target list is the same as the declared return type, this is
* okay. This can happen, for example, where the body of the function
* is 'SELECT (x = func2())', where func2 has the same return type
* as the function that's calling it.
* is 'SELECT (x = func2())', where func2 has the same return type as
* the function that's calling it.
*/
if (tlistlen == 1)
{
@ -408,10 +409,10 @@ checkretval(Oid rettype, List *queryTreeList)
}
/*
* By here, the procedure returns a tuple or set of tuples. This part of
* the typechecking is a hack. We look up the relation that is the
* declared return type, and be sure that attributes 1 .. n in the target
* list match the declared types.
* By here, the procedure returns a tuple or set of tuples. This part
* of the typechecking is a hack. We look up the relation that is the
* declared return type, and be sure that attributes 1 .. n in the
* target list match the declared types.
*/
reln = heap_open(typerelid, AccessShareLock);
relid = reln->rd_id;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.59 2001/02/12 20:07:21 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.60 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.14 2001/02/16 03:16:58 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -86,9 +86,10 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
CommitTransactionCommand();
return;
}
/*
* We can VACUUM ANALYZE any table except pg_statistic.
* see update_relstats
* We can VACUUM ANALYZE any table except pg_statistic. see
* update_relstats
*/
if (strcmp(NameStr(((Form_pg_class) GETSTRUCT(tuple))->relname),
StatisticRelationName) == 0)
@ -104,9 +105,11 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (!pg_ownercheck(GetUserId(), RelationGetRelationName(onerel),
RELNAME))
{
/* we already did an elog during vacuum
elog(NOTICE, "Skipping \"%s\" --- only table owner can VACUUM it",
RelationGetRelationName(onerel));
/*
* we already did an elog during vacuum elog(NOTICE, "Skipping
* \"%s\" --- only table owner can VACUUM it",
* RelationGetRelationName(onerel));
*/
heap_close(onerel, NoLock);
CommitTransactionCommand();
@ -295,8 +298,9 @@ attr_stats(Relation onerel, int attr_cnt, VacAttrStats *vacattrstats, HeapTuple
stats->nonnull_cnt++;
/*
* If the value is toasted, detoast it to avoid repeated detoastings
* and resultant memory leakage inside the comparison routines.
* If the value is toasted, detoast it to avoid repeated
* detoastings and resultant memory leakage inside the comparison
* routines.
*/
if (!stats->attr->attbyval && stats->attr->attlen == -1)
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
@ -489,22 +493,21 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
{
/*
* empty relation, so put a dummy value in
* attdispersion
* empty relation, so put a dummy value in attdispersion
*/
selratio = 0;
}
else if (stats->null_cnt <= 1 && stats->best_cnt == 1)
{
/*
* looks like we have a unique-key attribute --- flag
* this with special -1.0 flag value.
* looks like we have a unique-key attribute --- flag this
* with special -1.0 flag value.
*
* The correct dispersion is 1.0/numberOfRows, but since
* the relation row count can get updated without
* recomputing dispersion, we want to store a
* "symbolic" value and figure 1.0/numberOfRows on the
* fly.
* The correct dispersion is 1.0/numberOfRows, but since the
* relation row count can get updated without recomputing
* dispersion, we want to store a "symbolic" value and
* figure 1.0/numberOfRows on the fly.
*/
selratio = -1;
}
@ -515,8 +518,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
{
/*
* exact result when there are just 1 or 2
* values...
* exact result when there are just 1 or 2 values...
*/
double min_cnt_d = stats->min_cnt,
max_cnt_d = stats->max_cnt,
@ -552,12 +554,12 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
/*
* Create pg_statistic tuples for the relation, if we have
* gathered the right data. del_stats() previously
* deleted all the pg_statistic tuples for the rel, so we
* just have to insert new ones here.
* gathered the right data. del_stats() previously deleted
* all the pg_statistic tuples for the rel, so we just have to
* insert new ones here.
*
* Note analyze_rel() has seen to it that we won't come here
* when vacuuming pg_statistic itself.
* Note analyze_rel() has seen to it that we won't come here when
* vacuuming pg_statistic itself.
*/
if (VacAttrStatsLtGtValid(stats) && stats->initialized)
{
@ -682,6 +684,3 @@ del_stats(Oid relid, int attcnt, int *attnums)
*/
heap_close(pgstatistic, NoLock);
}

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.76 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.77 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -161,6 +161,7 @@ Async_Notify(char *relname)
/* no point in making duplicate entries in the list ... */
if (!AsyncExistsPendingNotify(relname))
{
/*
* We allocate list memory from the global malloc pool to ensure
* that it will live until we want to use it. This is probably
@ -349,9 +350,7 @@ Async_UnlistenAll()
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, key);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
{
simple_heap_delete(lRel, &lTuple->t_self);
}
heap_endscan(sRel);
heap_close(lRel, AccessExclusiveLock);
@ -499,6 +498,7 @@ AtCommit_Notify()
*/
if (kill(listenerPID, SIGUSR2) < 0)
{
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.64 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.65 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -75,8 +75,8 @@ cluster(char *oldrelname, char *oldindexname)
StrNCpy(saveoldindexname, oldindexname, NAMEDATALEN);
/*
* We grab exclusive access to the target rel and index for the duration
* of the transaction.
* We grab exclusive access to the target rel and index for the
* duration of the transaction.
*/
OldHeap = heap_openr(saveoldrelname, AccessExclusiveLock);
OIDOldHeap = RelationGetRelid(OldHeap);
@ -154,8 +154,8 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
* Need to make a copy of the tuple descriptor,
* since heap_create_with_catalog modifies it.
* Need to make a copy of the tuple descriptor, since
* heap_create_with_catalog modifies it.
*/
tupdesc = CreateTupleDescCopyConstr(OldHeapDesc);
@ -164,16 +164,15 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
allowSystemTableMods);
/*
* Advance command counter so that the newly-created
* relation's catalog tuples will be visible to heap_open.
* Advance command counter so that the newly-created relation's
* catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the new relation.
* Note that AlterTableCreateToastTable ends with
* CommandCounterIncrement(), so that the TOAST table will
* be visible for insertion.
* If necessary, create a TOAST table for the new relation. Note that
* AlterTableCreateToastTable ends with CommandCounterIncrement(), so
* that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(NewName, true);
@ -198,12 +197,12 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
/*
* Create a new index like the old one. To do this I get the info
* from pg_index, and add a new index with a temporary name (that
* will be changed later).
* from pg_index, and add a new index with a temporary name (that will
* be changed later).
*
* NOTE: index_create will cause the new index to be a temp relation
* if its parent table is, so we don't need to do anything special
* for the temp-table case here.
* NOTE: index_create will cause the new index to be a temp relation if
* its parent table is, so we don't need to do anything special for
* the temp-table case here.
*/
Old_pg_index_Tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(OIDOldIndex),
@ -266,13 +265,15 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
LocalHeapTuple.t_datamcxt = NULL;
LocalHeapTuple.t_data = NULL;
heap_fetch(LocalOldHeap, SnapshotNow, &LocalHeapTuple, &LocalBuffer);
if (LocalHeapTuple.t_data != NULL) {
if (LocalHeapTuple.t_data != NULL)
{
/*
* We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus,
* the source relation would get trashed, which is bad news
* if we abort later on. (This was a bug in releases thru 7.0)
* the source relation would get trashed, which is bad news if
* we abort later on. (This was a bug in releases thru 7.0)
*/
HeapTuple copiedTuple = heap_copytuple(&LocalHeapTuple);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.122 2001/02/27 22:07:34 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
@ -502,8 +502,8 @@ AlterTableAddColumn(const char *relationName,
heap_close(rel, NoLock);
/*
* Automatically create the secondary relation for TOAST
* if it formerly had no such but now has toastable attributes.
* Automatically create the secondary relation for TOAST if it
* formerly had no such but now has toastable attributes.
*/
CommandCounterIncrement();
AlterTableCreateToastTable(relationName, true);
@ -1147,15 +1147,17 @@ AlterTableAddConstraint(char *relationName,
elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
/*
* Scan all of the rows, looking for a false match
* Scan all of the rows, looking for a false
* match
*/
scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
AssertState(scan != NULL);
/*
* We need to make a parse state and range table to allow
* us to transformExpr and fix_opids to get a version of
* the expression we can pass to ExecQual
* We need to make a parse state and range
* table to allow us to transformExpr and
* fix_opids to get a version of the
* expression we can pass to ExecQual
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntry(pstate, relationName, NULL,
@ -1174,14 +1176,16 @@ AlterTableAddConstraint(char *relationName,
name);
/*
* Make sure no outside relations are referred to.
* Make sure no outside relations are referred
* to.
*/
if (length(pstate->p_rtable) != 1)
elog(ERROR, "Only relation '%s' can be referenced in CHECK",
relationName);
/*
* Might as well try to reduce any constant expressions.
* Might as well try to reduce any constant
* expressions.
*/
expr = eval_const_expressions(expr);
@ -1197,8 +1201,8 @@ AlterTableAddConstraint(char *relationName,
econtext = MakeExprContext(slot, CurrentMemoryContext);
/*
* Scan through the rows now, checking the expression
* at each row.
* Scan through the rows now, checking the
* expression at each row.
*/
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
{
@ -1221,10 +1225,12 @@ AlterTableAddConstraint(char *relationName,
heap_close(rel, NoLock);
elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
}
/*
* Call AddRelationRawConstraints to do the real adding --
* It duplicates some of the above, but does not check the
* validity of the constraint against tuples already in
* Call AddRelationRawConstraints to do the
* real adding -- It duplicates some of the
* above, but does not check the validity of
* the constraint against tuples already in
* the table.
*/
AddRelationRawConstraints(rel, NIL, constlist);
@ -1241,7 +1247,8 @@ AlterTableAddConstraint(char *relationName,
case T_FkConstraint:
{
FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
Relation rel, pkrel;
Relation rel,
pkrel;
HeapScanDesc scan;
HeapTuple tuple;
Trigger trig;
@ -1279,7 +1286,10 @@ AlterTableAddConstraint(char *relationName,
elog(ERROR, "referencing table \"%s\" not a relation",
relationName);
/* First we check for limited correctness of the constraint */
/*
* First we check for limited correctness of the
* constraint
*/
rel_attrs = pkrel->rd_att->attrs;
indexoidlist = RelationGetIndexList(pkrel);
@ -1302,23 +1312,29 @@ AlterTableAddConstraint(char *relationName,
{
List *attrl;
/* Make sure this index has the same number of keys -- It obviously
* won't match otherwise. */
/*
* Make sure this index has the same number of
* keys -- It obviously won't match otherwise.
*/
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
if (i != length(fkconstraint->pk_attrs))
found = false;
else {
else
{
/* go through the fkconstraint->pk_attrs list */
foreach(attrl, fkconstraint->pk_attrs)
{
Ident *attr = lfirst(attrl);
found = false;
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{
int pkattno = indexStruct->indkey[i];
if (pkattno > 0)
{
char *name = NameStr(rel_attrs[pkattno - 1]->attname);
if (strcmp(name, attr->name) == 0)
{
found = true;
@ -1344,18 +1360,24 @@ AlterTableAddConstraint(char *relationName,
heap_close(pkrel, NoLock);
rel_attrs = rel->rd_att->attrs;
if (fkconstraint->fk_attrs!=NIL) {
if (fkconstraint->fk_attrs != NIL)
{
List *fkattrs;
Ident *fkattr;
found = false;
foreach(fkattrs, fkconstraint->fk_attrs) {
foreach(fkattrs, fkconstraint->fk_attrs)
{
int count;
found = false;
fkattr = lfirst(fkattrs);
for (count = 0; count < rel->rd_att->natts; count++) {
for (count = 0; count < rel->rd_att->natts; count++)
{
char *name = NameStr(rel->rd_att->attrs[count]->attname);
if (strcmp(name, fkattr->name)==0) {
if (strcmp(name, fkattr->name) == 0)
{
found = true;
break;
}
@ -1652,10 +1674,11 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
"chunk_data",
BYTEAOID,
-1, 0, false);
/*
* Ensure that the toast table doesn't itself get toasted,
* or we'll be toast :-(. This is essential for chunk_data because
* type bytea is toastable; hit the other two just to be sure.
* Ensure that the toast table doesn't itself get toasted, or we'll be
* toast :-(. This is essential for chunk_data because type bytea is
* toastable; hit the other two just to be sure.
*/
tupdesc->attrs[0]->attstorage = 'p';
tupdesc->attrs[1]->attstorage = 'p';

View File

@ -7,7 +7,7 @@
* Copyright (c) 1999, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.26 2001/01/23 04:32:21 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.27 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.134 2001/03/14 21:47:50 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -76,6 +76,7 @@ static StringInfoData attribute_buf;
#ifdef MULTIBYTE
static int client_encoding;
static int server_encoding;
#endif
@ -285,6 +286,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
"directly to or from a file. Anyone can COPY to stdout or "
"from stdin. Psql's \\copy command also works for anyone.");
/*
* This restriction is unfortunate, but necessary until the frontend
* COPY protocol is redesigned to be binary-safe...
@ -344,8 +346,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
mode_t oumask; /* Pre-existing umask value */
/*
* Prevent write to relative path ... too easy to shoot oneself
* in the foot by overwriting a database file ...
* Prevent write to relative path ... too easy to shoot
* oneself in the foot by overwriting a database file ...
*/
if (filename[0] != '/')
elog(ERROR, "Relative path not allowed for server side"
@ -408,7 +410,10 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
attr_count = rel->rd_att->natts;
attr = rel->rd_att->attrs;
/* For binary copy we really only need isvarlena, but compute it all... */
/*
* For binary copy we really only need isvarlena, but compute it
* all...
*/
out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
elements = (Oid *) palloc(attr_count * sizeof(Oid));
isvarlena = (bool *) palloc(attr_count * sizeof(bool));
@ -507,10 +512,12 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
}
else
{
/*
* If we have a toasted datum, forcibly detoast it to avoid
* memory leakage inside the type's output routine (or
* for binary case, becase we must output untoasted value).
* If we have a toasted datum, forcibly detoast it to
* avoid memory leakage inside the type's output routine
* (or for binary case, becase we must output untoasted
* value).
*/
if (isvarlena[i])
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
@ -552,8 +559,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
* We need this horsing around because we don't know
* how shorter data values are aligned within a Datum.
* We need this horsing around because we don't
* know how shorter data values are aligned within
* a Datum.
*/
store_att_byval(&datumBuf, value, fld_size);
CopySendData(&datumBuf,
@ -622,8 +630,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
/*
* We need a ResultRelInfo so we can use the regular executor's
* index-entry-making machinery. (There used to be a huge amount
* of code here that basically duplicated execUtils.c ...)
* index-entry-making machinery. (There used to be a huge amount of
* code here that basically duplicated execUtils.c ...)
*/
resultRelInfo = makeNode(ResultRelInfo);
resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
@ -833,8 +841,9 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
* We need this horsing around because we don't know
* how shorter data values are aligned within a Datum.
* We need this horsing around because we don't
* know how shorter data values are aligned within
* a Datum.
*/
Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp);
@ -1163,6 +1172,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
char *string_start;
int mblen;
int i;
#endif
#ifdef MULTIBYTE

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.72 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -152,8 +152,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
/*
* Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't
* see the new rel anyway until we commit), but it keeps the lock manager
* from complaining about deadlock risks.
* see the new rel anyway until we commit), but it keeps the lock
* manager from complaining about deadlock risks.
*/
rel = heap_openr(relname, AccessExclusiveLock);
@ -255,11 +255,11 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno)
if (var->varlevelsup == 0 && var->varno == 1)
{
/*
* ??? the following may be a problem when the
* node is multiply referenced though
* stringToNode() doesn't create such a node
* currently.
* ??? the following may be a problem when the node is
* multiply referenced though stringToNode() doesn't create
* such a node currently.
*/
Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1];
@ -373,9 +373,12 @@ MergeAttributes(List *schema, List *supers, bool istemp,
AttrNumber attrno;
TupleDesc tupleDesc;
TupleConstr *constr;
AttrNumber *newattno, *partialAttidx;
AttrNumber *newattno,
*partialAttidx;
Node *expr;
int i, attidx, attno_exist;
int i,
attidx,
attno_exist;
relation = heap_openr(name, AccessShareLock);
@ -385,7 +388,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && is_temp_rel_name(name))
elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"", name);
/* We should have an UNDER permission flag for this, but for now,
/*
* We should have an UNDER permission flag for this, but for now,
* demand that creator of a child table own the parent.
*/
if (!pg_ownercheck(GetUserId(), name, RELNAME))
@ -398,9 +402,10 @@ MergeAttributes(List *schema, List *supers, bool istemp,
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
newattno[i] = 0;
/*
* searching and storing order are different.
* another table is needed.
* searching and storing order are different. another table is
* needed.
*/
partialAttidx = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.73 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.74 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -84,10 +84,10 @@ createdb(const char *dbname, const char *dbpath,
/*
* Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit.
* However, holding an exclusive lock on pg_database for the whole time
* we are copying the source database doesn't seem like a good idea,
* so accept possibility of race to create. We will check again after
* we grab the exclusive lock.
* However, holding an exclusive lock on pg_database for the whole
* time we are copying the source database doesn't seem like a good
* idea, so accept possibility of race to create. We will check again
* after we grab the exclusive lock.
*/
if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL))
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
@ -102,9 +102,10 @@ createdb(const char *dbname, const char *dbpath,
&src_istemplate, &src_lastsysoid, src_dbpath))
elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist",
dbtemplate);
/*
* Permission check: to copy a DB that's not marked datistemplate,
* you must be superuser or the owner thereof.
* Permission check: to copy a DB that's not marked datistemplate, you
* must be superuser or the owner thereof.
*/
if (!src_istemplate)
{
@ -112,6 +113,7 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
dbtemplate);
}
/*
* Determine physical path of source database
*/
@ -134,13 +136,15 @@ createdb(const char *dbname, const char *dbpath,
encoding = src_encoding;
/*
* Preassign OID for pg_database tuple, so that we can compute db path.
* Preassign OID for pg_database tuple, so that we can compute db
* path.
*/
dboid = newoid();
/*
* Compute nominal location (where we will try to access the database),
* and resolve alternate physical location if one is specified.
* Compute nominal location (where we will try to access the
* database), and resolve alternate physical location if one is
* specified.
*/
nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid);
@ -155,8 +159,8 @@ createdb(const char *dbname, const char *dbpath,
/*
* Force dirty buffers out to disk, to ensure source database is
* up-to-date for the copy. (We really only need to flush buffers
* for the source database...)
* up-to-date for the copy. (We really only need to flush buffers for
* the source database...)
*/
BufferSync();
@ -231,7 +235,8 @@ createdb(const char *dbname, const char *dbpath,
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
tuple->t_data->t_oid = dboid; /* override heap_insert's OID selection */
tuple->t_data->t_oid = dboid; /* override heap_insert's OID
* selection */
heap_insert(pg_database_rel, tuple);
@ -311,8 +316,8 @@ dropdb(const char *dbname)
elog(ERROR, "DROP DATABASE: permission denied");
/*
* Disallow dropping a DB that is marked istemplate. This is just
* to prevent people from accidentally dropping template0 or template1;
* Disallow dropping a DB that is marked istemplate. This is just to
* prevent people from accidentally dropping template0 or template1;
* they can do so if they're really determined ...
*/
if (db_istemplate)
@ -338,6 +343,7 @@ dropdb(const char *dbname)
tup = heap_getnext(pgdbscan, 0);
if (!HeapTupleIsValid(tup))
{
/*
* This error should never come up since the existence of the
* database is checked earlier
@ -503,6 +509,7 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
{
/* must be environment variable */
char *var = getenv(dbpath);
if (!var)
elog(ERROR, "Postmaster environment variable '%s' not set", dbpath);
if (var[0] != '/')

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.52 2001/02/12 20:07:21 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -217,21 +217,26 @@ void
CreateFunction(ProcedureStmt *stmt, CommandDest dest)
{
char *probin_str;
/* pathname of executable file that executes this function, if any */
char *prosrc_str;
/* SQL that executes this function, if any */
char *prorettype;
/* Type of return value (or member of set of values) from function */
char languageName[NAMEDATALEN];
/*
* name of language of function, with case adjusted: "C",
* "internal", "sql", etc.
* name of language of function, with case adjusted: "C", "internal",
* "sql", etc.
*/
bool returnsSet;
/* The function returns a set of values, as opposed to a singleton. */
/*
@ -478,8 +483,8 @@ DefineAggregate(char *aggName, List *parameters)
DefElem *defel = (DefElem *) lfirst(pl);
/*
* sfunc1, stype1, and initcond1 are accepted as obsolete spellings
* for sfunc, stype, initcond.
* sfunc1, stype1, and initcond1 are accepted as obsolete
* spellings for sfunc, stype, initcond.
*/
if (strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetString(defel);
@ -547,9 +552,9 @@ DefineType(char *typeName, List *parameters)
char storage = 'p'; /* default storage in TOAST */
/*
* Type names must be one character shorter than other names,
* allowing room to create the corresponding array type name with
* prepended "_".
* Type names must be one character shorter than other names, allowing
* room to create the corresponding array type name with prepended
* "_".
*/
if (strlen(typeName) > (NAMEDATALEN - 2))
{
@ -699,7 +704,9 @@ defGetString(DefElem *def)
return str;
}
case T_Float:
/* T_Float values are kept in string form, so this type cheat
/*
* T_Float values are kept in string form, so this type cheat
* works (and doesn't risk losing precision)
*/
return strVal(def->arg);

View File

@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.64 2001/01/27 01:41:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.65 2001/03/22 03:59:22 momjian Exp $
*
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.45 2001/02/23 09:26:14 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -118,9 +118,9 @@ DefineIndex(char *heapRelationName,
accessMethodName);
/*
* XXX Hardwired hacks to check for limitations on supported index types.
* We really ought to be learning this info from entries in the pg_am
* table, instead of having it wired in here!
* XXX Hardwired hacks to check for limitations on supported index
* types. We really ought to be learning this info from entries in the
* pg_am table, instead of having it wired in here!
*/
if (unique && accessMethodId != BTREE_AM_OID)
elog(ERROR, "DefineIndex: unique indices are only available with the btree access method");
@ -161,7 +161,8 @@ DefineIndex(char *heapRelationName,
elog(ERROR, "Existing indexes are inactive. REINDEX first");
/*
* Prepare arguments for index_create, primarily an IndexInfo structure
* Prepare arguments for index_create, primarily an IndexInfo
* structure
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_Predicate = (Node *) cnfPred;
@ -515,8 +516,8 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
attribute->class);
/*
* Assume the opclass is supported by this index access method
* if we can find at least one relevant entry in pg_amop.
* Assume the opclass is supported by this index access method if we
* can find at least one relevant entry in pg_amop.
*/
ScanKeyEntryInitialize(&entry[0], 0,
Anum_pg_amop_amopid,
@ -540,17 +541,18 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
heap_close(relation, AccessShareLock);
/*
* Make sure the operators associated with this opclass actually accept
* the column data type. This prevents possible coredumps caused by
* user errors like applying text_ops to an int4 column. We will accept
* an opclass as OK if the operator's input datatype is binary-compatible
* with the actual column datatype. Note we assume that all the operators
* associated with an opclass accept the same datatypes, so checking the
* first one we happened to find in the table is sufficient.
* Make sure the operators associated with this opclass actually
* accept the column data type. This prevents possible coredumps
* caused by user errors like applying text_ops to an int4 column. We
* will accept an opclass as OK if the operator's input datatype is
* binary-compatible with the actual column datatype. Note we assume
* that all the operators associated with an opclass accept the same
* datatypes, so checking the first one we happened to find in the
* table is sufficient.
*
* If the opclass was the default for the datatype, assume we can skip
* this check --- that saves a few cycles in the most common case.
* If pg_opclass is wrong then we're probably screwed anyway...
* this check --- that saves a few cycles in the most common case. If
* pg_opclass is wrong then we're probably screwed anyway...
*/
if (doTypeCheck)
{
@ -752,18 +754,18 @@ ReindexDatabase(const char *dbname, bool force, bool all)
elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database.");
/*
* We cannot run inside a user transaction block; if we were
* inside a transaction, then our commit- and
* start-transaction-command calls would not have the intended effect!
* We cannot run inside a user transaction block; if we were inside a
* transaction, then our commit- and start-transaction-command calls
* would not have the intended effect!
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block");
/*
* Create a memory context that will survive forced transaction commits
* we do below. Since it is a child of QueryContext, it will go away
* eventually even if we suffer an error; there's no need for special
* abort cleanup logic.
* Create a memory context that will survive forced transaction
* commits we do below. Since it is a child of QueryContext, it will
* go away eventually even if we suffer an error; there's no need for
* special abort cleanup logic.
*/
private_context = AllocSetContextCreate(QueryContext,
"ReindexDatabase",

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.59 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.60 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.56 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -189,15 +189,15 @@ renamerel(const char *oldrelname, const char *newrelname)
newrelname);
/*
* Check for renaming a temp table, which only requires altering
* the temp-table mapping, not the underlying table.
* Check for renaming a temp table, which only requires altering the
* temp-table mapping, not the underlying table.
*/
if (rename_temp_relation(oldrelname, newrelname))
return; /* all done... */
/*
* Instead of using heap_openr(), do it the hard way, so that we
* can rename indexes as well as regular relations.
* Instead of using heap_openr(), do it the hard way, so that we can
* rename indexes as well as regular relations.
*/
targetrelation = RelationNameGetRelation(oldrelname);
@ -219,8 +219,9 @@ renamerel(const char *oldrelname, const char *newrelname)
heap_close(targetrelation, NoLock);
/*
* Flush the relcache entry (easier than trying to change it at exactly
* the right instant). It'll get rebuilt on next access to relation.
* Flush the relcache entry (easier than trying to change it at
* exactly the right instant). It'll get rebuilt on next access to
* relation.
*
* XXX What if relation is myxactonly?
*
@ -244,8 +245,8 @@ renamerel(const char *oldrelname, const char *newrelname)
elog(ERROR, "renamerel: relation \"%s\" exists", newrelname);
/*
* Update pg_class tuple with new relname. (Scribbling on reltup
* is OK because it's a copy...)
* Update pg_class tuple with new relname. (Scribbling on reltup is
* OK because it's a copy...)
*/
StrNCpy(NameStr(((Form_pg_class) GETSTRUCT(reltup))->relname),
newrelname, NAMEDATALEN);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.51 2001/03/07 21:20:26 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.52 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -409,7 +409,8 @@ do_setval(char *seqname, int32 next, bool iscalled)
/* save info in local cache */
elm->last = next; /* last returned number */
elm->cached = next; /* last cached number (forget cached values) */
elm->cached = next; /* last cached number (forget cached
* values) */
START_CRIT_SECTION();
{
@ -511,6 +512,7 @@ get_seq_name(text *seqin)
else
{
seqname = rawname;
/*
* It's important that this match the identifier downcasing code
* used by backend/parser/scan.l.
@ -752,7 +754,8 @@ get_param(DefElem *def)
return -1;
}
void seq_redo(XLogRecPtr lsn, XLogRecord *record)
void
seq_redo(XLogRecPtr lsn, XLogRecord *record)
{
uint8 info = record->xl_info & ~XLR_INFO_MASK;
Relation reln;
@ -795,11 +798,13 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
return;
}
void seq_undo(XLogRecPtr lsn, XLogRecord *record)
void
seq_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
void seq_desc(char *buf, uint8 xl_info, char* rec)
void
seq_desc(char *buf, uint8 xl_info, char *rec)
{
uint8 info = xl_info & ~XLR_INFO_MASK;
xl_seq_rec *xlrec = (xl_seq_rec *) rec;

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.88 2001/03/14 21:50:32 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -87,7 +87,9 @@ CreateTrigger(CreateTrigStmt *stmt)
constrrelid = InvalidOid;
else
{
/* NoLock is probably sufficient here, since we're only
/*
* NoLock is probably sufficient here, since we're only
* interested in getting the relation's OID...
*/
rel = heap_openr(stmt->constrrelname, NoLock);
@ -577,7 +579,8 @@ RelationBuildTriggers(Relation relation)
DatumGetCString(DirectFunctionCall1(nameout,
NameGetDatum(&pg_trigger->tgname))));
build->tgfoid = pg_trigger->tgfoid;
build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as uninitialized */
build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as
* uninitialized */
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
build->tgisconstraint = pg_trigger->tgisconstraint;
@ -841,17 +844,17 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContext oldContext;
/*
* Fmgr lookup info is cached in the Trigger structure,
* so that we need not repeat the lookup on every call.
* Fmgr lookup info is cached in the Trigger structure, so that we
* need not repeat the lookup on every call.
*/
if (trigger->tgfunc.fn_oid == InvalidOid)
fmgr_info(trigger->tgfoid, &trigger->tgfunc);
/*
* Do the function evaluation in the per-tuple memory context,
* so that leaked memory will be reclaimed once per tuple.
* Note in particular that any new tuple created by the trigger function
* will live till the end of the tuple cycle.
* Do the function evaluation in the per-tuple memory context, so that
* leaked memory will be reclaimed once per tuple. Note in particular
* that any new tuple created by the trigger function will live till
* the end of the tuple cycle.
*/
oldContext = MemoryContextSwitchTo(per_tuple_context);
@ -868,8 +871,8 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContextSwitchTo(oldContext);
/*
* Trigger protocol allows function to return a null pointer,
* but NOT to set the isnull result flag.
* Trigger protocol allows function to return a null pointer, but NOT
* to set the isnull result flag.
*/
if (fcinfo.isnull)
elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL",
@ -915,10 +918,8 @@ ExecARInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0)
{
DeferredTriggerSaveEvent(rel, TRIGGER_EVENT_INSERT, NULL, trigtuple);
}
}
bool
ExecBRDeleteTriggers(EState *estate, ItemPointer tupleid)
@ -1240,10 +1241,11 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
static void
deferredTriggerAddEvent(DeferredTriggerEvent event)
{
/*
* Since the event list could grow quite long, we keep track of the
* list tail and append there, rather than just doing a stupid "lappend".
* This avoids O(N^2) behavior for large numbers of events.
* list tail and append there, rather than just doing a stupid
* "lappend". This avoids O(N^2) behavior for large numbers of events.
*/
event->dte_next = NULL;
if (deftrig_event_tail == NULL)

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.73 2001/01/24 19:42:53 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -603,6 +603,7 @@ DropUser(DropUserStmt *stmt)
}
heap_endscan(scan);
heap_close(pg_rel, AccessExclusiveLock);
/*
* Advance command counter so that later iterations of this loop
* will see the changes already made. This is essential if, for

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.187 2001/03/14 08:40:57 inoue Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.188 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -120,9 +120,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *anal_cols)
/*
* Create special memory context for cross-transaction storage.
*
* Since it is a child of QueryContext, it will go away eventually
* even if we suffer an error; there's no need for special abort
* cleanup logic.
* Since it is a child of QueryContext, it will go away eventually even
* if we suffer an error; there's no need for special abort cleanup
* logic.
*/
vac_context = AllocSetContextCreate(QueryContext,
"Vacuum",
@ -215,8 +215,8 @@ vacuum_shutdown()
/*
* Clean up working storage --- note we must do this after
* StartTransactionCommand, else we might be trying to delete
* the active context!
* StartTransactionCommand, else we might be trying to delete the
* active context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
@ -360,10 +360,10 @@ vacuum_rel(Oid relid)
{
Relation onerel;
LockRelId onerelid;
VacPageListData vacuum_pages; /* List of pages to vacuum and/or clean
* indices */
VacPageListData fraged_pages; /* List of pages with space enough for
* re-using */
VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* clean indices */
VacPageListData fraged_pages; /* List of pages with space enough
* for re-using */
Relation *Irel;
int32 nindices,
i;
@ -412,9 +412,9 @@ vacuum_rel(Oid relid)
/*
* Get a session-level exclusive lock too. This will protect our
* exclusive access to the relation across multiple transactions,
* so that we can vacuum the relation's TOAST table (if any) secure
* in the knowledge that no one is diddling the parent relation.
* exclusive access to the relation across multiple transactions, so
* that we can vacuum the relation's TOAST table (if any) secure in
* the knowledge that no one is diddling the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
@ -459,9 +459,10 @@ vacuum_rel(Oid relid)
else
vacrelstats->hasindex = false;
#ifdef NOT_USED
/*
* reindex in VACUUM is dangerous under WAL.
* ifdef out until it becomes safe.
* reindex in VACUUM is dangerous under WAL. ifdef out until it
* becomes safe.
*/
if (reindex)
{
@ -506,6 +507,7 @@ vacuum_rel(Oid relid)
}
else
{
/*
* Flush dirty pages out to disk. We must do this even if we
* didn't do anything else, because we want to ensure that all
@ -537,11 +539,11 @@ vacuum_rel(Oid relid)
CommitTransactionCommand();
/*
* If the relation has a secondary toast one, vacuum that too
* while we still hold the session lock on the master table.
* We don't need to propagate "analyze" to it, because the toaster
* always uses hardcoded index access and statistics are
* totally unimportant for toast relations
* If the relation has a secondary toast one, vacuum that too while we
* still hold the session lock on the master table. We don't need to
* propagate "analyze" to it, because the toaster always uses
* hardcoded index access and statistics are totally unimportant for
* toast relations
*/
if (toast_relid != InvalidOid)
vacuum_rel(toast_relid);
@ -1142,8 +1144,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* If this (chain) tuple is moved by me already then I
* have to check is it in vacpage or not - i.e. is it moved
* while cleaning this page or some previous one.
* have to check is it in vacpage or not - i.e. is it
* moved while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@ -1232,8 +1234,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* xaction and this tuple is already deleted by
* me. Actually, upper part of chain should be
* removed and seems that this should be handled
* in scan_heap(), but it's not implemented at
* the moment and so we just stop shrinking here.
* in scan_heap(), but it's not implemented at the
* moment and so we just stop shrinking here.
*/
ReleaseBuffer(Cbuf);
pfree(vtmove);
@ -1256,8 +1258,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
/*
* if to_vacpage no longer has enough free space to be
* useful, remove it from fraged_pages list
* if to_vacpage no longer has enough free space
* to be useful, remove it from fraged_pages list
*/
if (to_vacpage != NULL &&
!enough_space(to_vacpage, vacrelstats->min_tlen))
@ -1460,21 +1462,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
*
* NOTE: a nasty bug used to lurk here. It is possible
* for the source and destination pages to be the same
* (since this tuple-chain member can be on a page lower
* than the one we're currently processing in the outer
* loop). If that's true, then after vacuum_page() the
* source tuple will have been moved, and tuple.t_data
* will be pointing at garbage. Therefore we must do
* everything that uses tuple.t_data BEFORE this step!!
* (since this tuple-chain member can be on a page
* lower than the one we're currently processing in
* the outer loop). If that's true, then after
* vacuum_page() the source tuple will have been
* moved, and tuple.t_data will be pointing at
* garbage. Therefore we must do everything that uses
* tuple.t_data BEFORE this step!!
*
* This path is different from the other callers of
* vacuum_page, because we have already incremented the
* vacpage's offsets_used field to account for the
* vacuum_page, because we have already incremented
* the vacpage's offsets_used field to account for the
* tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is
* wrong. But since that's a good debugging check for
* all other callers, we work around it here rather
* than remove it.
* vacuum_page's check for offsets_used == 0 is wrong.
* But since that's a good debugging check for all
* other callers, we work around it here rather than
* remove it.
*/
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
{
@ -1526,7 +1529,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Set new tuple's t_ctid pointing to itself for last
* tuple in chain, and to next tuple in chain otherwise.
* tuple in chain, and to next tuple in chain
* otherwise.
*/
if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self;
@ -1552,13 +1556,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (Irel != (Relation *) NULL)
{
/*
* XXX using CurrentMemoryContext here means
* intra-vacuum memory leak for functional indexes.
* Should fix someday.
* intra-vacuum memory leak for functional
* indexes. Should fix someday.
*
* XXX This code fails to handle partial indexes!
* Probably should change it to use ExecOpenIndices.
* Probably should change it to use
* ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@ -1698,13 +1704,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* insert index' tuples if needed */
if (Irel != (Relation *) NULL)
{
/*
* XXX using CurrentMemoryContext here means
* intra-vacuum memory leak for functional indexes.
* Should fix someday.
* XXX using CurrentMemoryContext here means intra-vacuum
* memory leak for functional indexes. Should fix someday.
*
* XXX This code fails to handle partial indexes!
* Probably should change it to use ExecOpenIndices.
* XXX This code fails to handle partial indexes! Probably
* should change it to use ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@ -1803,14 +1809,15 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (num_moved > 0)
{
/*
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require
* a lot of extra code to close and re-open the relation, indices,
* etc. For now, a quick hack: record status of current transaction
* as committed, and continue.
* etc. For now, a quick hack: record status of current
* transaction as committed, and continue.
*/
RecordTransactionCommit();
}
@ -1943,6 +1950,7 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
recptr = log_heap_clean(onerel, buf, (char *) unused,
(char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
@ -1962,9 +1970,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all tuples
* have correct on-row commit status on disk (see bufmgr.c's comments
* for FlushRelationBuffers()).
* we don't need to truncate, because we want to ensure that all
* tuples have correct on-row commit status on disk (see bufmgr.c's
* comments for FlushRelationBuffers()).
*/
i = FlushRelationBuffers(onerel, blkno);
if (i < 0)
@ -2005,8 +2013,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
int i;
nblocks = vacuum_pages->num_pages;
nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with
* them */
nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
{
@ -2022,9 +2029,9 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all tuples
* have correct on-row commit status on disk (see bufmgr.c's comments
* for FlushRelationBuffers()).
* we don't need to truncate, because we want to ensure that all
* tuples have correct on-row commit status on disk (see bufmgr.c's
* comments for FlushRelationBuffers()).
*/
Assert(vacrelstats->num_pages >= vacuum_pages->empty_end_pages);
nblocks = vacrelstats->num_pages - vacuum_pages->empty_end_pages;
@ -2042,7 +2049,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
vacrelstats->num_pages, nblocks);
nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
Assert(nblocks >= 0);
vacrelstats->num_pages = nblocks; /* set new number of blocks */
vacrelstats->num_pages = nblocks; /* set new number of
* blocks */
}
}
@ -2072,6 +2080,7 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
recptr = log_heap_clean(onerel, buffer, (char *) unused,
(char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.45 2001/01/24 19:42:53 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.46 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -453,6 +453,7 @@ parse_DefaultXactIsoLevel(char *value)
{
#if 0
TransactionState s = CurrentTransactionState;
#endif
if (value == NULL)
@ -704,8 +705,7 @@ SetPGVariable(const char *name, const char *value)
char *mvalue = value ? pstrdup(value) : ((char *) NULL);
/*
* Special cases ought to be removed and handled separately
* by TCOP
* Special cases ought to be removed and handled separately by TCOP
*/
if (strcasecmp(name, "datestyle") == 0)
parse_date(mvalue);
@ -749,6 +749,7 @@ GetPGVariable(const char *name)
else
{
const char *val = GetConfigOption(name);
elog(NOTICE, "%s is %s", name, val);
}
}

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: view.c,v 1.53 2001/01/24 19:42:53 momjian Exp $
* $Id: view.c,v 1.54 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -211,12 +211,12 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
*rt_entry2;
/*
* Make a copy of the given parsetree. It's not so much that we
* don't want to scribble on our input, it's that the parser has
* a bad habit of outputting multiple links to the same subtree
* for constructs like BETWEEN, and we mustn't have OffsetVarNodes
* increment the varno of a Var node twice. copyObject will expand
* any multiply-referenced subtree into multiple copies.
* Make a copy of the given parsetree. It's not so much that we don't
* want to scribble on our input, it's that the parser has a bad habit
* of outputting multiple links to the same subtree for constructs
* like BETWEEN, and we mustn't have OffsetVarNodes increment the
* varno of a Var node twice. copyObject will expand any
* multiply-referenced subtree into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@ -261,6 +261,7 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
void
DefineView(char *viewName, Query *viewParse)
{
/*
* Create the "view" relation NOTE: if it already exists, the xact
* will be aborted.
@ -295,9 +296,10 @@ DefineView(char *viewName, Query *viewParse)
void
RemoveView(char *viewName)
{
/*
* We just have to drop the relation; the associated rules will
* be cleaned up automatically.
* We just have to drop the relation; the associated rules will be
* cleaned up automatically.
*/
heap_drop_with_catalog(viewName, allowSystemTableMods);
}

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $Id: execAmi.c,v 1.56 2001/01/24 19:42:53 momjian Exp $
* $Id: execAmi.c,v 1.57 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.25 2001/01/29 00:39:17 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.26 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -265,6 +265,7 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
void
ExecFreeJunkFilter(JunkFilter *junkfilter)
{
/*
* Since the junkfilter is inside its own context, we just have to
* delete the context and we're set.

View File

@ -27,7 +27,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.138 2001/01/29 00:39:18 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.139 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -287,6 +287,7 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
static void
ExecCheckQueryPerms(CmdType operation, Query *parseTree, Plan *plan)
{
/*
* Check RTEs in the query's primary rangetable.
*/
@ -405,12 +406,13 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
relName = rte->relname;
/*
* userid to check as: current user unless we have a setuid indication.
* userid to check as: current user unless we have a setuid
* indication.
*
* Note: GetUserId() is presently fast enough that there's no harm
* in calling it separately for each RTE. If that stops being true,
* we could call it once in ExecCheckQueryPerms and pass the userid
* down from there. But for now, no need for the extra clutter.
* Note: GetUserId() is presently fast enough that there's no harm in
* calling it separately for each RTE. If that stops being true, we
* could call it once in ExecCheckQueryPerms and pass the userid down
* from there. But for now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@ -426,6 +428,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
if (rte->checkForWrite)
{
/*
* Note: write access in a SELECT context means SELECT FOR UPDATE.
* Right now we don't distinguish that from true update as far as
@ -519,6 +522,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (resultRelations != NIL)
{
/*
* Multiple result relations (due to inheritance)
* parseTree->resultRelations identifies them all
@ -541,8 +545,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
/*
* Single result relation identified by parseTree->resultRelation
* Single result relation identified by
* parseTree->resultRelation
*/
numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
@ -559,6 +565,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
/*
* if no result relation, then set state appropriately
*/
@ -616,10 +623,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
tupType = ExecGetTupType(plan); /* tuple descriptor */
/*
* Initialize the junk filter if needed. SELECT and INSERT queries need
* a filter if there are any junk attrs in the tlist. UPDATE and
* DELETE always need one, since there's always a junk 'ctid' attribute
* present --- no need to look first.
* Initialize the junk filter if needed. SELECT and INSERT queries
* need a filter if there are any junk attrs in the tlist. UPDATE and
* DELETE always need one, since there's always a junk 'ctid'
* attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@ -650,11 +657,12 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (junk_filter_needed)
{
/*
* If there are multiple result relations, each one needs
* its own junk filter. Note this is only possible for
* UPDATE/DELETE, so we can't be fooled by some needing
* a filter and some not.
* If there are multiple result relations, each one needs its
* own junk filter. Note this is only possible for
* UPDATE/DELETE, so we can't be fooled by some needing a
* filter and some not.
*/
if (parseTree->resultRelations != NIL)
{
@ -678,6 +686,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
resultRelInfo++;
subplans = lnext(subplans);
}
/*
* Set active junkfilter too; at this point ExecInitAppend
* has already selected an active result relation...
@ -750,10 +759,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the into relation.
* Note that AlterTableCreateToastTable ends with
* CommandCounterIncrement(), so that the TOAST table will
* be visible for insertion.
* If necessary, create a TOAST table for the into
* relation. Note that AlterTableCreateToastTable ends
* with CommandCounterIncrement(), so that the TOAST table
* will be visible for insertion.
*/
AlterTableCreateToastTable(intoName, true);
@ -817,9 +826,8 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
* index entries for the tuples we add/update. We need not do
* this for a DELETE, however, since deletion doesn't affect
* indexes.
* index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE)
@ -857,8 +865,8 @@ EndPlan(Plan *plan, EState *estate)
estate->es_tupleTable = NULL;
/*
* close the result relation(s) if any, but hold locks
* until xact commit. Also clean up junkfilters if present.
* close the result relation(s) if any, but hold locks until xact
* commit. Also clean up junkfilters if present.
*/
resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--)
@ -1227,11 +1235,12 @@ ExecAppend(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself.
* The tuple table slot should not try to clear it.
* memory context, and therefore will go away by itself. The
* tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@ -1411,11 +1420,12 @@ ExecReplace(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
* memory context, and therefore will go away by itself.
* The tuple table slot should not try to clear it.
* memory context, and therefore will go away by itself. The
* tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@ -1469,10 +1479,10 @@ lreplace:;
/*
* Note: instead of having to update the old index tuples associated
* with the heap tuple, all we do is form and insert new index
* tuples. This is because replaces are actually deletes and inserts
* and index tuple deletion is done automagically by the vacuum
* daemon. All we do is insert new index tuples. -cim 9/27/89
* with the heap tuple, all we do is form and insert new index tuples.
* This is because replaces are actually deletes and inserts and index
* tuple deletion is done automagically by the vacuum daemon. All we
* do is insert new index tuples. -cim 9/27/89
*/
/*
@ -1525,8 +1535,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
}
/*
* We will use the EState's per-tuple context for evaluating constraint
* expressions (creating it if it's not already there).
* We will use the EState's per-tuple context for evaluating
* constraint expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.83 2001/01/29 00:39:18 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.84 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -112,10 +112,11 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
isDone));
/*
* If refexpr yields NULL, result is always NULL, for now anyway.
* (This means you cannot assign to an element or slice of an array
* that's NULL; it'll just stay NULL.)
* (This means you cannot assign to an element or slice of an
* array that's NULL; it'll just stay NULL.)
*/
if (*isNull)
return (Datum) NULL;
@ -166,7 +167,11 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL));
/* If any index expr yields NULL, result is NULL or source array */
/*
* If any index expr yields NULL, result is NULL or source
* array
*/
if (*isNull)
{
if (!isAssignment || array_source == NULL)
@ -189,9 +194,10 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL);
/*
* For now, can't cope with inserting NULL into an array,
* so make it a no-op per discussion above...
* For now, can't cope with inserting NULL into an array, so make
* it a no-op per discussion above...
*/
if (*isNull)
{
@ -601,10 +607,12 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
if (thisArgIsDone != ExprSingleResult)
{
/*
* We allow only one argument to have a set value; we'd need
* much more complexity to keep track of multiple set arguments
* (cf. ExecTargetList) and it doesn't seem worth it.
* much more complexity to keep track of multiple set
* arguments (cf. ExecTargetList) and it doesn't seem worth
* it.
*/
if (argIsDone != ExprSingleResult)
elog(ERROR, "Functions and operators can take only one set argument");
@ -639,8 +647,8 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
/*
* arguments is a list of expressions to evaluate before passing to
* the function manager. We skip the evaluation if it was already
* done in the previous call (ie, we are continuing the evaluation
* of a set-valued function). Otherwise, collect the current argument
* done in the previous call (ie, we are continuing the evaluation of
* a set-valued function). Otherwise, collect the current argument
* values into fcache->fcinfo.
*/
if (fcache->fcinfo.nargs > 0 && !fcache->argsValid)
@ -664,6 +672,7 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
*/
if (fcache->func.fn_retset || fcache->hasSetArg)
{
/*
* We need to return a set result. Complain if caller not ready
* to accept one.
@ -672,15 +681,16 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
elog(ERROR, "Set-valued function called in context that cannot accept a set");
/*
* This loop handles the situation where we have both a set argument
* and a set-valued function. Once we have exhausted the function's
* value(s) for a particular argument value, we have to get the next
* argument value and start the function over again. We might have
* to do it more than once, if the function produces an empty result
* set for a particular input value.
* This loop handles the situation where we have both a set
* argument and a set-valued function. Once we have exhausted the
* function's value(s) for a particular argument value, we have to
* get the next argument value and start the function over again.
* We might have to do it more than once, if the function produces
* an empty result set for a particular input value.
*/
for (;;)
{
/*
* If function is strict, and there are any NULL arguments,
* skip calling the function (at least for this set of args).
@ -716,13 +726,15 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
if (*isDone != ExprEndResult)
{
/*
* Got a result from current argument. If function itself
* returns set, flag that we want to reuse current argument
* values on next call.
* returns set, flag that we want to reuse current
* argument values on next call.
*/
if (fcache->func.fn_retset)
fcache->argsValid = true;
/*
* Make sure we say we are returning a set, even if the
* function itself doesn't return sets.
@ -762,11 +774,12 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
}
else
{
/*
* Non-set case: much easier.
*
* If function is strict, and there are any NULL arguments,
* skip calling the function and return NULL.
* If function is strict, and there are any NULL arguments, skip
* calling the function and return NULL.
*/
if (fcache->func.fn_strict)
{
@ -852,9 +865,9 @@ ExecEvalFunc(Expr *funcClause,
FunctionCachePtr fcache;
/*
* we extract the oid of the function associated with the func node and
* then pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* we extract the oid of the function associated with the func node
* and then pass the work onto ExecMakeFunctionResult which evaluates
* the arguments and returns the result of calling the function on the
* evaluated arguments.
*
* this is nearly identical to the ExecEvalOper code.
@ -1412,10 +1425,8 @@ ExecCleanTargetListLength(List *targetlist)
len++;
}
else
{
len += curTle->fjoin->fj_nNodes;
}
}
return len;
}
@ -1440,6 +1451,7 @@ ExecTargetList(List *targetlist,
ExprDoneCond *isDone)
{
MemoryContext oldContext;
#define NPREALLOCDOMAINS 64
char nullsArray[NPREALLOCDOMAINS];
bool fjIsNullArray[NPREALLOCDOMAINS];
@ -1484,10 +1496,11 @@ ExecTargetList(List *targetlist,
* we have a really large targetlist. otherwise we use the stack.
*
* We also allocate a bool array that is used to hold fjoin result state,
* and another array that holds the isDone status for each targetlist item.
* The isDone status is needed so that we can iterate, generating multiple
* tuples, when one or more tlist items return sets. (We expect the caller
* to call us again if we return *isDone = ExprMultipleResult.)
* and another array that holds the isDone status for each targetlist
* item. The isDone status is needed so that we can iterate,
* generating multiple tuples, when one or more tlist items return
* sets. (We expect the caller to call us again if we return *isDone
* = ExprMultipleResult.)
*/
if (nodomains > NPREALLOCDOMAINS)
{
@ -1554,8 +1567,10 @@ ExecTargetList(List *targetlist,
ExecEvalFjoin(tle, econtext, fjIsNull, isDone);
/* XXX this is wrong, but since fjoin code is completely broken
* anyway, I'm not going to worry about it now --- tgl 8/23/00
/*
* XXX this is wrong, but since fjoin code is completely
* broken anyway, I'm not going to worry about it now --- tgl
* 8/23/00
*/
if (isDone && *isDone == ExprEndResult)
{
@ -1594,6 +1609,7 @@ ExecTargetList(List *targetlist,
if (haveDoneSets)
{
/*
* note: can't get here unless we verified isDone != NULL
*/
@ -1601,7 +1617,8 @@ ExecTargetList(List *targetlist,
{
/*
* all sets are done, so report that tlist expansion is complete.
* all sets are done, so report that tlist expansion is
* complete.
*/
*isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext);
@ -1644,10 +1661,11 @@ ExecTargetList(List *targetlist,
}
}
}
/*
* If we cannot make a tuple because some sets are empty,
* we still have to cycle the nonempty sets to completion,
* else resources will not be released from subplans etc.
* If we cannot make a tuple because some sets are empty, we
* still have to cycle the nonempty sets to completion, else
* resources will not be released from subplans etc.
*/
if (*isDone == ExprEndResult)
{

View File

@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.15 2001/01/24 19:42:54 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.16 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.46 2001/01/29 00:39:18 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.47 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -542,10 +542,12 @@ TupleTableSlot *
ExecInitNullTupleSlot(EState *estate, TupleDesc tupType)
{
TupleTableSlot *slot = ExecInitExtraTupleSlot(estate);
/*
* Since heap_getattr() will treat attributes beyond a tuple's t_natts
* as being NULL, we can make an all-nulls tuple just by making it be of
* zero length. However, the slot descriptor must match the real tupType.
* as being NULL, we can make an all-nulls tuple just by making it be
* of zero length. However, the slot descriptor must match the real
* tupType.
*/
HeapTuple nullTuple;
Datum values[1];

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.73 2001/01/29 00:39:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.74 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -148,6 +148,7 @@ ExecAssignExprContext(EState *estate, CommonState *commonstate)
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = CurrentMemoryContext;
/*
* Create working memory for expression evaluation in this context.
*/
@ -184,14 +185,16 @@ MakeExprContext(TupleTableSlot *slot,
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = queryContext;
/*
* We make the temporary context a child of current working context,
* not of the specified queryContext. This seems reasonable but I'm
* not totally sure about it...
*
* Expression contexts made via this routine typically don't live long
* enough to get reset, so specify a minsize of 0. That avoids alloc'ing
* any memory in the common case where expr eval doesn't use any.
* enough to get reset, so specify a minsize of 0. That avoids
* alloc'ing any memory in the common case where expr eval doesn't use
* any.
*/
econtext->ecxt_per_tuple_memory =
AllocSetContextCreate(CurrentMemoryContext,
@ -635,8 +638,9 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapDescriptor = RelationGetDescr(heapRelation);
/*
* We will use the EState's per-tuple context for evaluating predicates
* and functional-index functions (creating it if it's not already there).
* We will use the EState's per-tuple context for evaluating
* predicates and functional-index functions (creating it if it's not
* already there).
*/
econtext = GetPerTupleExprContext(estate);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.43 2001/01/29 00:39:19 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.44 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -235,9 +235,7 @@ init_sql_fcache(FmgrInfo *finfo)
nargs * sizeof(Oid));
}
else
{
argOidVect = (Oid *) NULL;
}
tmp = SysCacheGetAttr(PROCOID,
procedureTuple,
@ -346,8 +344,8 @@ copy_function_result(SQLFunctionCachePtr fcache,
return resultSlot; /* no need to copy result */
/*
* If first time through, we have to initialize the funcSlot's
* tuple descriptor.
* If first time through, we have to initialize the funcSlot's tuple
* descriptor.
*/
if (funcSlot->ttc_tupleDescriptor == NULL)
{
@ -415,12 +413,14 @@ postquel_execute(execution_state *es,
/*
* If we are supposed to return a tuple, we return the tuple slot
* pointer converted to Datum. If we are supposed to return a simple
* value, then project out the first attribute of the result tuple
* (ie, take the first result column of the final SELECT).
* pointer converted to Datum. If we are supposed to return a
* simple value, then project out the first attribute of the
* result tuple (ie, take the first result column of the final
* SELECT).
*/
if (fcache->returnsTuple)
{
/*
* XXX do we need to remove junk attrs from the result tuple?
* Probably OK to leave them, as long as they are at the end.
@ -434,6 +434,7 @@ postquel_execute(execution_state *es,
1,
resSlot->ttc_tupleDescriptor,
&(fcinfo->isnull));
/*
* Note: if result type is pass-by-reference then we are
* returning a pointer into the tuple copied by

View File

@ -46,7 +46,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.75 2001/02/16 03:16:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.76 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -130,8 +130,8 @@ typedef struct AggStatePerAggData
* an input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT) aggregate, we just feed the input values
* straight to the transition function. If it's DISTINCT, we pass
* the input values into a Tuplesort object; then at completion of the
* straight to the transition function. If it's DISTINCT, we pass the
* input values into a Tuplesort object; then at completion of the
* input tuple group, we scan the sorted values, eliminate duplicates,
* and run the transition function on the rest.
*/
@ -144,11 +144,12 @@ typedef struct AggStatePerAggData
bool noTransValue; /* true if transValue not set yet */
/*
* Note: noTransValue initially has the same value as transValueIsNull,
* and if true both are cleared to false at the same time. They are
* not the same though: if transfn later returns a NULL, we want to
* keep that NULL and not auto-replace it with a later input value.
* Only the first non-NULL input will be auto-substituted.
* Note: noTransValue initially has the same value as
* transValueIsNull, and if true both are cleared to false at the same
* time. They are not the same though: if transfn later returns a
* NULL, we want to keep that NULL and not auto-replace it with a
* later input value. Only the first non-NULL input will be
* auto-substituted.
*/
} AggStatePerAggData;
@ -195,8 +196,8 @@ initialize_aggregate(AggStatePerAgg peraggstate)
* (Re)set transValue to the initial value.
*
* Note that when the initial value is pass-by-ref, we just reuse it
* without copying for each group. Hence, transition function
* had better not scribble on its input, or it will fail for GROUP BY!
* without copying for each group. Hence, transition function had
* better not scribble on its input, or it will fail for GROUP BY!
*/
peraggstate->transValue = peraggstate->initValue;
peraggstate->transValueIsNull = peraggstate->initValueIsNull;
@ -228,11 +229,12 @@ advance_transition_function(AggStatePerAgg peraggstate,
{
if (isNull)
{
/*
* For a strict transfn, nothing happens at a NULL input tuple;
* we just keep the prior transValue. However, if the transtype
* is pass-by-ref, we have to copy it into the new context
* because the old one is going to get reset.
* For a strict transfn, nothing happens at a NULL input
* tuple; we just keep the prior transValue. However, if the
* transtype is pass-by-ref, we have to copy it into the new
* context because the old one is going to get reset.
*/
if (!peraggstate->transValueIsNull)
peraggstate->transValue = datumCopy(peraggstate->transValue,
@ -242,15 +244,17 @@ advance_transition_function(AggStatePerAgg peraggstate,
}
if (peraggstate->noTransValue)
{
/*
* transValue has not been initialized. This is the first non-NULL
* input value. We use it as the initial value for transValue.
* (We already checked that the agg's input type is binary-
* compatible with its transtype, so straight copy here is OK.)
* transValue has not been initialized. This is the first
* non-NULL input value. We use it as the initial value for
* transValue. (We already checked that the agg's input type
* is binary- compatible with its transtype, so straight copy
* here is OK.)
*
* We had better copy the datum if it is pass-by-ref, since
* the given pointer may be pointing into a scan tuple that
* will be freed on the next iteration of the scan.
* We had better copy the datum if it is pass-by-ref, since the
* given pointer may be pointing into a scan tuple that will
* be freed on the next iteration of the scan.
*/
peraggstate->transValue = datumCopy(newVal,
peraggstate->transtypeByVal,
@ -261,11 +265,13 @@ advance_transition_function(AggStatePerAgg peraggstate,
}
if (peraggstate->transValueIsNull)
{
/*
* Don't call a strict function with NULL inputs. Note it is
* possible to get here despite the above tests, if the transfn
* is strict *and* returned a NULL on a prior cycle. If that
* happens we will propagate the NULL all the way to the end.
* possible to get here despite the above tests, if the
* transfn is strict *and* returned a NULL on a prior cycle.
* If that happens we will propagate the NULL all the way to
* the end.
*/
return;
}
@ -283,10 +289,10 @@ advance_transition_function(AggStatePerAgg peraggstate,
newVal = FunctionCallInvoke(&fcinfo);
/*
* If the transition function was uncooperative, it may have
* given us a pass-by-ref result that points at the scan tuple
* or the prior-cycle working memory. Copy it into the active
* context if it doesn't look right.
* If the transition function was uncooperative, it may have given us
* a pass-by-ref result that points at the scan tuple or the
* prior-cycle working memory. Copy it into the active context if it
* doesn't look right.
*/
if (!peraggstate->transtypeByVal && !fcinfo.isnull &&
!MemoryContextContains(CurrentMemoryContext,
@ -321,19 +327,21 @@ process_sorted_aggregate(AggState *aggstate,
/*
* Note: if input type is pass-by-ref, the datums returned by the sort
* are freshly palloc'd in the per-query context, so we must be careful
* to pfree them when they are no longer needed.
* are freshly palloc'd in the per-query context, so we must be
* careful to pfree them when they are no longer needed.
*/
while (tuplesort_getdatum(peraggstate->sortstate, true,
&newVal, &isNull))
{
/*
* DISTINCT always suppresses nulls, per SQL spec, regardless of
* the transition function's strictness.
*/
if (isNull)
continue;
/*
* Clear and select the current working context for evaluation of
* the equality function and transition function.
@ -349,6 +357,7 @@ process_sorted_aggregate(AggState *aggstate,
/* equal to prior, so forget this one */
if (!peraggstate->inputtypeByVal)
pfree(DatumGetPointer(newVal));
/*
* note we do NOT flip contexts in this case, so no need to
* copy prior transValue to other context.
@ -357,6 +366,7 @@ process_sorted_aggregate(AggState *aggstate,
else
{
advance_transition_function(peraggstate, newVal, false);
/*
* Make the other context current so that this transition
* result is preserved.
@ -389,6 +399,7 @@ static void
finalize_aggregate(AggStatePerAgg peraggstate,
Datum *resultVal, bool *resultIsNull)
{
/*
* Apply the agg's finalfn if one is provided, else return transValue.
*/
@ -480,7 +491,8 @@ ExecAgg(Agg *node)
peragg = aggstate->peragg;
/*
* We loop retrieving groups until we find one matching node->plan.qual
* We loop retrieving groups until we find one matching
* node->plan.qual
*/
do
{
@ -578,12 +590,12 @@ ExecAgg(Agg *node)
* calculation, and stash results in the per-output-tuple context.
*
* This is a bit tricky when there are both DISTINCT and plain
* aggregates: we must first finalize all the plain aggs and then all
* the DISTINCT ones. This is needed because the last transition
* values for the plain aggs are stored in the not-current working
* context, and we have to evaluate those aggs (and stash the results
* in the output tup_cxt!) before we start flipping contexts again
* in process_sorted_aggregate.
* aggregates: we must first finalize all the plain aggs and then
* all the DISTINCT ones. This is needed because the last
* transition values for the plain aggs are stored in the
* not-current working context, and we have to evaluate those aggs
* (and stash the results in the output tup_cxt!) before we start
* flipping contexts again in process_sorted_aggregate.
*/
oldContext = MemoryContextSwitchTo(aggstate->tup_cxt);
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
@ -766,11 +778,12 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
ExecAssignExprContext(estate, &aggstate->csstate.cstate);
/*
* We actually need three separate expression memory contexts: one
* for calculating per-output-tuple values (ie, the finished aggregate
* We actually need three separate expression memory contexts: one for
* calculating per-output-tuple values (ie, the finished aggregate
* results), and two that we ping-pong between for per-input-tuple
* evaluation of input expressions and transition functions. The
* context made by ExecAssignExprContext() is used as the output context.
* context made by ExecAssignExprContext() is used as the output
* context.
*/
aggstate->tup_cxt =
aggstate->csstate.cstate.cs_ExprContext->ecxt_per_tuple_memory;
@ -882,15 +895,16 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
/*
* If the transfn is strict and the initval is NULL, make sure
* input type and transtype are the same (or at least binary-
* compatible), so that it's OK to use the first input value
* as the initial transValue. This should have been checked at
* agg definition time, but just in case...
* compatible), so that it's OK to use the first input value as
* the initial transValue. This should have been checked at agg
* definition time, but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
/*
* Note: use the type from the input expression here,
* not aggform->aggbasetype, because the latter might be 0.
* Note: use the type from the input expression here, not
* aggform->aggbasetype, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
@ -903,9 +917,10 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
if (aggref->aggdistinct)
{
/*
* Note: use the type from the input expression here,
* not aggform->aggbasetype, because the latter might be 0.
* Note: use the type from the input expression here, not
* aggform->aggbasetype, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
@ -947,12 +962,14 @@ ExecEndAgg(Agg *node)
Plan *outerPlan;
ExecFreeProjectionInfo(&aggstate->csstate.cstate);
/*
* Make sure ExecFreeExprContext() frees the right expr context...
*/
aggstate->csstate.cstate.cs_ExprContext->ecxt_per_tuple_memory =
aggstate->tup_cxt;
ExecFreeExprContext(&aggstate->csstate.cstate);
/*
* ... and I free the others.
*/

View File

@ -15,7 +15,7 @@
* locate group boundaries.
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.41 2001/02/16 03:16:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.42 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -88,8 +88,8 @@ ExecGroupEveryTuple(Group *node)
tupdesc = ExecGetScanType(&grpstate->csstate);
/*
* We need not call ResetExprContext here because execTuplesMatch
* will reset the per-tuple memory context once per input tuple.
* We need not call ResetExprContext here because execTuplesMatch will
* reset the per-tuple memory context once per input tuple.
*/
/* if we haven't returned first tuple of a new group yet ... */
@ -199,8 +199,8 @@ ExecGroupOneTuple(Group *node)
tupdesc = ExecGetScanType(&grpstate->csstate);
/*
* We need not call ResetExprContext here because execTuplesMatch
* will reset the per-tuple memory context once per input tuple.
* We need not call ResetExprContext here because execTuplesMatch will
* reset the per-tuple memory context once per input tuple.
*/
firsttuple = grpstate->grp_firstTuple;

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* $Id: nodeHash.c,v 1.54 2001/01/24 19:42:54 momjian Exp $
* $Id: nodeHash.c,v 1.55 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -540,9 +540,7 @@ ExecHashGetBucket(HashJoinTable hashtable,
* ------------------
*/
if (isNull)
{
bucketno = 0;
}
else
{
bucketno = hashFunc(keyval,

Some files were not shown because too many files have changed in this diff Show More