Re-run pgindent, fixing a problem where comment lines after a blank

comment line where output as too long, and update typedefs for /lib
directory.  Also fix case where identifiers were used as variable names
in the backend, but as typedefs in ecpg (favor the backend for
indenting).

Backpatch to 8.1.X.
This commit is contained in:
Bruce Momjian 2005-11-22 18:17:34 +00:00
parent e196eedd8a
commit 436a2956d8
264 changed files with 4403 additions and 4097 deletions

View File

@ -60,9 +60,9 @@
typedef struct remoteConn
{
PGconn *conn; /* Hold the remote connection */
PGconn *conn; /* Hold the remote connection */
int openCursorCount; /* The number of open cursors */
bool newXactForCursor; /* Opened a transaction for a cursor */
bool newXactForCursor; /* Opened a transaction for a cursor */
} remoteConn;
/*
@ -85,8 +85,8 @@ static Oid get_relid_from_relname(text *relname_text);
static char *generate_relation_name(Oid relid);
/* Global */
static remoteConn *pconn = NULL;
static HTAB *remoteConnHash = NULL;
static remoteConn *pconn = NULL;
static HTAB *remoteConnHash = NULL;
/*
* Following is list that holds multiple remote connections.
@ -347,7 +347,7 @@ dblink_open(PG_FUNCTION_ARGS)
else
conn = rconn->conn;
/* If we are not in a transaction, start one */
/* If we are not in a transaction, start one */
if (PQtransactionStatus(conn) == PQTRANS_IDLE)
{
res = PQexec(conn, "BEGIN");
@ -1505,7 +1505,7 @@ get_text_array_contents(ArrayType *array, int *numitems)
else
{
values[i] = DatumGetCString(DirectFunctionCall1(textout,
PointerGetDatum(ptr)));
PointerGetDatum(ptr)));
ptr = att_addlength(ptr, typlen, PointerGetDatum(ptr));
ptr = (char *) att_align(ptr, typalign);
}
@ -1717,7 +1717,7 @@ get_sql_update(Oid relid, int2vector *pkattnums, int16 pknumatts, char **src_pka
key = -1;
if (key > -1)
val = tgt_pkattvals[key] ? pstrdup(tgt_pkattvals[key]) : NULL;
val = tgt_pkattvals[key] ? pstrdup(tgt_pkattvals[key]) : NULL;
else
val = SPI_getvalue(tuple, tupdesc, i + 1);
@ -1744,7 +1744,7 @@ get_sql_update(Oid relid, int2vector *pkattnums, int16 pknumatts, char **src_pka
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum - 1]->attname)));
if (tgt_pkattvals != NULL)
val = tgt_pkattvals[i] ? pstrdup(tgt_pkattvals[i]) : NULL;
val = tgt_pkattvals[i] ? pstrdup(tgt_pkattvals[i]) : NULL;
else
val = SPI_getvalue(tuple, tupdesc, pkattnum);

View File

@ -39,7 +39,7 @@ g_int_consistent(PG_FUNCTION_ARGS)
if (strategy == BooleanSearchStrategy)
PG_RETURN_BOOL(execconsistent((QUERYTYPE *) query,
(ArrayType *) DatumGetPointer(entry->key),
GIST_LEAF(entry)));
GIST_LEAF(entry)));
/* XXX are we sure it's safe to scribble on the query object here? */
/* XXX what about toasted input? */
@ -97,7 +97,7 @@ g_int_union(PG_FUNCTION_ARGS)
for (i = 0; i < entryvec->n; i++)
{
ArrayType *ent = GETENTRY(entryvec, i);
ArrayType *ent = GETENTRY(entryvec, i);
CHECKARRVALID(ent);
totlen += ARRNELEMS(ent);
@ -108,8 +108,8 @@ g_int_union(PG_FUNCTION_ARGS)
for (i = 0; i < entryvec->n; i++)
{
ArrayType *ent = GETENTRY(entryvec, i);
int nel;
ArrayType *ent = GETENTRY(entryvec, i);
int nel;
nel = ARRNELEMS(ent);
memcpy(ptr, ARRPTR(ent), nel * sizeof(int4));
@ -143,10 +143,10 @@ g_int_compress(PG_FUNCTION_ARGS)
CHECKARRVALID(r);
PREPAREARR(r);
if (ARRNELEMS(r)>= 2 * MAXNUMRANGE)
elog(NOTICE,"Input array is too big (%d maximum allowed, %d current), use gist__intbig_ops opclass instead",
2 * MAXNUMRANGE - 1, ARRNELEMS(r));
if (ARRNELEMS(r) >= 2 * MAXNUMRANGE)
elog(NOTICE, "Input array is too big (%d maximum allowed, %d current), use gist__intbig_ops opclass instead",
2 * MAXNUMRANGE - 1, ARRNELEMS(r));
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page, entry->offset, VARSIZE(r), FALSE);
@ -154,12 +154,14 @@ g_int_compress(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(retval);
}
/* leaf entries never compress one more time, only when entry->leafkey ==true,
so now we work only with internal keys */
/*
* leaf entries never compress one more time, only when entry->leafkey
* ==true, so now we work only with internal keys
*/
r = (ArrayType *) PG_DETOAST_DATUM(entry->key);
CHECKARRVALID(r);
if (ARRISVOID(r))
if (ARRISVOID(r))
{
if (r != (ArrayType *) DatumGetPointer(entry->key))
pfree(r);

View File

@ -1,5 +1,5 @@
/*
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.45 2005/10/29 19:38:07 tgl Exp $
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.46 2005/11/22 18:17:04 momjian Exp $
*
* pgbench: a simple benchmark program for PostgreSQL
* written by Tatsuo Ishii
@ -1110,7 +1110,8 @@ main(int argc, char **argv)
fprintf(stderr, "Use limit/ulimt to increase the limit before using pgbench.\n");
exit(1);
}
#endif /* #if !(defined(__CYGWIN__) || defined(__MINGW32__)) */
#endif /* #if !(defined(__CYGWIN__) ||
* defined(__MINGW32__)) */
break;
case 'C':
is_connect = 1;

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-decrypt.c,v 1.6 2005/10/15 02:49:06 momjian Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-decrypt.c,v 1.7 2005/11/22 18:17:04 momjian Exp $
*/
#include "postgres.h"
@ -269,14 +269,14 @@ prefix_init(void **priv_p, void *arg, PullFilter * src)
* The original purpose of the 2-byte check was to show user a
* friendly "wrong key" message. This made following possible:
*
* "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge Mister
* and Robert Zuccherato
* "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge
* Mister and Robert Zuccherato
*
* To avoid being 'oracle', we delay reporting, which basically means we
* prefer to run into corrupt packet header.
* To avoid being 'oracle', we delay reporting, which basically means
* we prefer to run into corrupt packet header.
*
* We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is possibility
* of attack via timing, so we don't.
* We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is
* possibility of attack via timing, so we don't.
*/
ctx->corrupt_prefix = 1;
}

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-pgsql.c,v 1.6 2005/10/15 02:49:06 momjian Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-pgsql.c,v 1.7 2005/11/22 18:17:04 momjian Exp $
*/
#include "postgres.h"
@ -125,8 +125,8 @@ add_entropy(text *data1, text *data2, text *data3)
/*
* Try to make the feeding unpredictable.
*
* Prefer data over keys, as it's rather likely that key is same in several
* calls.
* Prefer data over keys, as it's rather likely that key is same in
* several calls.
*/
/* chance: 7/8 */

View File

@ -547,8 +547,8 @@ crosstab(PG_FUNCTION_ARGS)
* Get the next category item value, which is alway
* attribute number three.
*
* Be careful to sssign the value to the array index based on
* which category we are presently processing.
* Be careful to sssign the value to the array index based
* on which category we are presently processing.
*/
values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3);
@ -870,8 +870,8 @@ get_crosstab_tuplestore(char *sql,
/*
* The provided SQL query must always return at least three columns:
*
* 1. rowname the label for each row - column 1 in the final result 2.
* category the label for each value-column in the final result 3.
* 1. rowname the label for each row - column 1 in the final result
* 2. category the label for each value-column in the final result 3.
* value the values used to populate the value-columns
*
* If there are more than three columns, the last two are taken as

View File

@ -178,7 +178,7 @@ gettoken_query(QPRS_STATE * state, int4 *val, int4 *lenval, char **strval, int2
state->state = WAITOPERATOR;
return VAL;
}
else if ( state->state == WAITFIRSTOPERAND )
else if (state->state == WAITFIRSTOPERAND)
return END;
else
ereport(ERROR,
@ -206,13 +206,13 @@ gettoken_query(QPRS_STATE * state, int4 *val, int4 *lenval, char **strval, int2
return ERR;
break;
case WAITSINGLEOPERAND:
if ( *(state->buf) == '\0' )
if (*(state->buf) == '\0')
return END;
*strval = state->buf;
*lenval = strlen( state->buf );
state->buf += strlen( state->buf );
*lenval = strlen(state->buf);
state->buf += strlen(state->buf);
state->count++;
return VAL;
return VAL;
default:
return ERR;
break;
@ -600,7 +600,7 @@ findoprnd(ITEM * ptr, int4 *pos)
* input
*/
static QUERYTYPE *
queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int cfg_id, bool isplain)
queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int cfg_id, bool isplain)
{
QPRS_STATE state;
int4 i;
@ -637,12 +637,13 @@ queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int c
/* parse query & make polish notation (postfix, but in reverse order) */
makepol(&state, pushval);
pfree(state.valstate.word);
if (!state.num) {
if (!state.num)
{
elog(NOTICE, "Query doesn't contain lexem(s)");
query = (QUERYTYPE*)palloc( HDRSIZEQT );
query = (QUERYTYPE *) palloc(HDRSIZEQT);
query->len = HDRSIZEQT;
query->size = 0;
return query;
return query;
}
/* make finish struct */
@ -928,9 +929,9 @@ to_tsquery(PG_FUNCTION_ARGS)
str = text2char(in);
PG_FREE_IF_COPY(in, 1);
query = queryin(str, pushval_morph, PG_GETARG_INT32(0),false);
if ( query->size == 0 )
query = queryin(str, pushval_morph, PG_GETARG_INT32(0), false);
if (query->size == 0)
PG_RETURN_POINTER(query);
res = clean_fakeval_v2(GETQUERY(query), &len);
@ -984,8 +985,8 @@ plainto_tsquery(PG_FUNCTION_ARGS)
PG_FREE_IF_COPY(in, 1);
query = queryin(str, pushval_morph, PG_GETARG_INT32(0), true);
if ( query->size == 0 )
if (query->size == 0)
PG_RETURN_POINTER(query);
res = clean_fakeval_v2(GETQUERY(query), &len);
@ -1023,4 +1024,3 @@ plainto_tsquery_current(PG_FUNCTION_ARGS)
Int32GetDatum(get_currcfg()),
PG_GETARG_DATUM(0)));
}

View File

@ -17,7 +17,7 @@ typedef struct ITEM
int4 val;
/* user-friendly value, must correlate with WordEntry */
uint32
istrue:1, /* use for ranking in Cover */
istrue:1, /* use for ranking in Cover */
length:11,
distance:20;
} ITEM;

View File

@ -7,180 +7,201 @@
#include "query.h"
typedef uint64 TPQTGist;
#define SIGLEN (sizeof(TPQTGist)*BITS_PER_BYTE)
#define SIGLEN (sizeof(TPQTGist)*BITS_PER_BYTE)
#define GETENTRY(vec,pos) ((TPQTGist *) DatumGetPointer((vec)->vector[(pos)].key))
PG_FUNCTION_INFO_V1(tsq_mcontains);
Datum tsq_mcontains(PG_FUNCTION_ARGS);
Datum tsq_mcontains(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(tsq_mcontained);
Datum tsq_mcontained(PG_FUNCTION_ARGS);
Datum tsq_mcontained(PG_FUNCTION_ARGS);
static TPQTGist
makesign(QUERYTYPE* a) {
int i;
ITEM *ptr = GETQUERY(a);
makesign(QUERYTYPE * a)
{
int i;
ITEM *ptr = GETQUERY(a);
TPQTGist sign = 0;
for (i = 0; i < a->size; i++) {
if ( ptr->type == VAL )
for (i = 0; i < a->size; i++)
{
if (ptr->type == VAL)
sign |= 1 << (ptr->val % SIGLEN);
ptr++;
}
return sign;
}
Datum
tsq_mcontains(PG_FUNCTION_ARGS) {
tsq_mcontains(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));
QUERYTYPE *ex = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
TPQTGist sq, se;
int i,j;
ITEM *iq, *ie;
TPQTGist sq,
se;
int i,
j;
ITEM *iq,
*ie;
if ( query->size < ex->size ) {
if (query->size < ex->size)
{
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( false );
PG_RETURN_BOOL(false);
}
sq = makesign(query);
se = makesign(ex);
if ( (sq&se)!=se ) {
if ((sq & se) != se)
{
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( false );
}
PG_RETURN_BOOL(false);
}
ie = GETQUERY(ex);
for(i=0;i<ex->size;i++) {
for (i = 0; i < ex->size; i++)
{
iq = GETQUERY(query);
if ( ie[i].type != VAL )
if (ie[i].type != VAL)
continue;
for(j=0;j<query->size;j++)
if ( iq[j].type == VAL && ie[i].val == iq[j].val ) {
j = query->size+1;
for (j = 0; j < query->size; j++)
if (iq[j].type == VAL && ie[i].val == iq[j].val)
{
j = query->size + 1;
break;
}
if ( j == query->size ) {
if (j == query->size)
{
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( false );
PG_RETURN_BOOL(false);
}
}
}
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( true );
PG_RETURN_BOOL(true);
}
Datum
tsq_mcontained(PG_FUNCTION_ARGS) {
tsq_mcontained(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(
DirectFunctionCall2(
tsq_mcontains,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
)
DirectFunctionCall2(
tsq_mcontains,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
)
);
}
PG_FUNCTION_INFO_V1(gtsq_in);
Datum gtsq_in(PG_FUNCTION_ARGS);
Datum gtsq_in(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_out);
Datum gtsq_out(PG_FUNCTION_ARGS);
Datum gtsq_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_compress);
Datum gtsq_compress(PG_FUNCTION_ARGS);
Datum gtsq_compress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_decompress);
Datum gtsq_decompress(PG_FUNCTION_ARGS);
Datum gtsq_decompress(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_consistent);
Datum gtsq_consistent(PG_FUNCTION_ARGS);
Datum gtsq_consistent(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_union);
Datum gtsq_union(PG_FUNCTION_ARGS);
Datum gtsq_union(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_same);
Datum gtsq_same(PG_FUNCTION_ARGS);
Datum gtsq_same(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_penalty);
Datum gtsq_penalty(PG_FUNCTION_ARGS);
Datum gtsq_penalty(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(gtsq_picksplit);
Datum gtsq_picksplit(PG_FUNCTION_ARGS);
Datum gtsq_picksplit(PG_FUNCTION_ARGS);
Datum
gtsq_in(PG_FUNCTION_ARGS) {
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
gtsq_in(PG_FUNCTION_ARGS)
{
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
}
Datum
gtsq_out(PG_FUNCTION_ARGS) {
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
gtsq_out(PG_FUNCTION_ARGS)
{
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
}
Datum
gtsq_compress(PG_FUNCTION_ARGS) {
gtsq_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval = entry;
if (entry->leafkey) {
TPQTGist *sign = (TPQTGist*)palloc( sizeof(TPQTGist) );
if (entry->leafkey)
{
TPQTGist *sign = (TPQTGist *) palloc(sizeof(TPQTGist));
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
*sign = makesign( (QUERYTYPE*)DatumGetPointer(PG_DETOAST_DATUM(entry->key)) );
*sign = makesign((QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)));
gistentryinit(*retval, PointerGetDatum(sign),
entry->rel, entry->page,
entry->offset, sizeof(TPQTGist), FALSE);
entry->rel, entry->page,
entry->offset, sizeof(TPQTGist), FALSE);
}
PG_RETURN_POINTER(retval);
}
Datum
gtsq_decompress(PG_FUNCTION_ARGS) {
gtsq_decompress(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(PG_GETARG_DATUM(0));
}
Datum
gtsq_consistent(PG_FUNCTION_ARGS) {
gtsq_consistent(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
TPQTGist *key = (TPQTGist*) DatumGetPointer(entry->key);
TPQTGist *key = (TPQTGist *) DatumGetPointer(entry->key);
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
TPQTGist sq = makesign(query);
if ( GIST_LEAF(entry) )
PG_RETURN_BOOL( ( (*key) & sq ) == ((strategy==1) ? sq : *key) );
else
PG_RETURN_BOOL( (*key) & sq );
if (GIST_LEAF(entry))
PG_RETURN_BOOL(((*key) & sq) == ((strategy == 1) ? sq : *key));
else
PG_RETURN_BOOL((*key) & sq);
}
Datum
gtsq_union(PG_FUNCTION_ARGS) {
gtsq_union(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
TPQTGist *sign = (TPQTGist*)palloc( sizeof(TPQTGist) );
int i;
int *size = (int *) PG_GETARG_POINTER(1);
TPQTGist *sign = (TPQTGist *) palloc(sizeof(TPQTGist));
int i;
int *size = (int *) PG_GETARG_POINTER(1);
memset( sign, 0, sizeof(TPQTGist) );
memset(sign, 0, sizeof(TPQTGist));
for (i = 0; i < entryvec->n;i++)
for (i = 0; i < entryvec->n; i++)
*sign |= *GETENTRY(entryvec, i);
*size = sizeof(TPQTGist);
@ -189,35 +210,40 @@ gtsq_union(PG_FUNCTION_ARGS) {
}
Datum
gtsq_same(PG_FUNCTION_ARGS) {
TPQTGist *a = (TPQTGist *) PG_GETARG_POINTER(0);
TPQTGist *b = (TPQTGist *) PG_GETARG_POINTER(1);
gtsq_same(PG_FUNCTION_ARGS)
{
TPQTGist *a = (TPQTGist *) PG_GETARG_POINTER(0);
TPQTGist *b = (TPQTGist *) PG_GETARG_POINTER(1);
PG_RETURN_POINTER( *a == *b );
PG_RETURN_POINTER(*a == *b);
}
static int
sizebitvec(TPQTGist sign) {
int size=0,i;
sizebitvec(TPQTGist sign)
{
int size = 0,
i;
for(i=0;i<SIGLEN;i++)
size += 0x01 & (sign>>i);
for (i = 0; i < SIGLEN; i++)
size += 0x01 & (sign >> i);
return size;
}
static int
hemdist(TPQTGist a, TPQTGist b) {
TPQTGist res = a ^ b;
hemdist(TPQTGist a, TPQTGist b)
{
TPQTGist res = a ^ b;
return sizebitvec(res);
}
Datum
gtsq_penalty(PG_FUNCTION_ARGS) {
TPQTGist *origval = (TPQTGist*) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
TPQTGist *newval = (TPQTGist*) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *penalty = (float *) PG_GETARG_POINTER(2);
gtsq_penalty(PG_FUNCTION_ARGS)
{
TPQTGist *origval = (TPQTGist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
TPQTGist *newval = (TPQTGist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *penalty = (float *) PG_GETARG_POINTER(2);
*penalty = hemdist(*origval, *newval);
@ -225,36 +251,45 @@ gtsq_penalty(PG_FUNCTION_ARGS) {
}
typedef struct {
OffsetNumber pos;
int4 cost;
typedef struct
{
OffsetNumber pos;
int4 cost;
} SPLITCOST;
static int
comparecost(const void *a, const void *b) {
if (((SPLITCOST *) a)->cost == ((SPLITCOST *) b)->cost)
return 0;
else
return (((SPLITCOST *) a)->cost > ((SPLITCOST *) b)->cost) ? 1 : -1;
comparecost(const void *a, const void *b)
{
if (((SPLITCOST *) a)->cost == ((SPLITCOST *) b)->cost)
return 0;
else
return (((SPLITCOST *) a)->cost > ((SPLITCOST *) b)->cost) ? 1 : -1;
}
#define WISH_F(a,b,c) (double)( -(double)(((a)-(b))*((a)-(b))*((a)-(b)))*(c) )
Datum
gtsq_picksplit(PG_FUNCTION_ARGS) {
gtsq_picksplit(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber maxoff = entryvec->n - 2;
OffsetNumber k,j;
OffsetNumber k,
j;
TPQTGist *datum_l, *datum_r;
int4 size_alpha, size_beta;
int4 size_waste, waste = -1;
int4 nbytes;
OffsetNumber seed_1 = 0, seed_2 = 0;
OffsetNumber *left, *right;
TPQTGist *datum_l,
*datum_r;
int4 size_alpha,
size_beta;
int4 size_waste,
waste = -1;
int4 nbytes;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
SPLITCOST *costvector;
SPLITCOST *costvector;
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
left = v->spl_left = (OffsetNumber *) palloc(nbytes);
@ -262,9 +297,11 @@ gtsq_picksplit(PG_FUNCTION_ARGS) {
v->spl_nleft = v->spl_nright = 0;
for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k))
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) {
size_waste = hemdist( *GETENTRY(entryvec,j), *GETENTRY(entryvec,k) );
if (size_waste > waste) {
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j))
{
size_waste = hemdist(*GETENTRY(entryvec, j), *GETENTRY(entryvec, k));
if (size_waste > waste)
{
waste = size_waste;
seed_1 = k;
seed_2 = j;
@ -272,47 +309,56 @@ gtsq_picksplit(PG_FUNCTION_ARGS) {
}
if (seed_1 == 0 || seed_2 == 0) {
if (seed_1 == 0 || seed_2 == 0)
{
seed_1 = 1;
seed_2 = 2;
}
datum_l = (TPQTGist*)palloc( sizeof(TPQTGist) );
*datum_l=*GETENTRY(entryvec,seed_1);
datum_r = (TPQTGist*)palloc( sizeof(TPQTGist) );
*datum_r=*GETENTRY(entryvec,seed_2);
datum_l = (TPQTGist *) palloc(sizeof(TPQTGist));
*datum_l = *GETENTRY(entryvec, seed_1);
datum_r = (TPQTGist *) palloc(sizeof(TPQTGist));
*datum_r = *GETENTRY(entryvec, seed_2);
maxoff = OffsetNumberNext(maxoff);
costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff);
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) {
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j))
{
costvector[j - 1].pos = j;
size_alpha = hemdist( *GETENTRY(entryvec,seed_1), *GETENTRY(entryvec,j) );
size_beta = hemdist( *GETENTRY(entryvec,seed_2), *GETENTRY(entryvec,j) );
size_alpha = hemdist(*GETENTRY(entryvec, seed_1), *GETENTRY(entryvec, j));
size_beta = hemdist(*GETENTRY(entryvec, seed_2), *GETENTRY(entryvec, j));
costvector[j - 1].cost = abs(size_alpha - size_beta);
}
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
for (k = 0; k < maxoff; k++) {
for (k = 0; k < maxoff; k++)
{
j = costvector[k].pos;
if ( j == seed_1 ) {
if (j == seed_1)
{
*left++ = j;
v->spl_nleft++;
continue;
} else if ( j == seed_2 ) {
}
else if (j == seed_2)
{
*right++ = j;
v->spl_nright++;
continue;
}
size_alpha = hemdist( *datum_l, *GETENTRY(entryvec,j) );
size_beta = hemdist( *datum_r, *GETENTRY(entryvec,j) );
size_alpha = hemdist(*datum_l, *GETENTRY(entryvec, j));
size_beta = hemdist(*datum_r, *GETENTRY(entryvec, j));
if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.05)) {
*datum_l |= *GETENTRY(entryvec,j);
if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.05))
{
*datum_l |= *GETENTRY(entryvec, j);
*left++ = j;
v->spl_nleft++;
} else {
*datum_r |= *GETENTRY(entryvec,j);
}
else
{
*datum_r |= *GETENTRY(entryvec, j);
*right++ = j;
v->spl_nright++;
}
@ -324,5 +370,3 @@ gtsq_picksplit(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(v);
}

View File

@ -6,135 +6,167 @@
MemoryContext AggregateContext = NULL;
static int
addone(int * counters, int last, int total) {
addone(int *counters, int last, int total)
{
counters[last]++;
if ( counters[last]>=total ) {
if (last==0)
if (counters[last] >= total)
{
if (last == 0)
return 0;
if ( addone( counters, last-1, total-1 ) == 0 )
if (addone(counters, last - 1, total - 1) == 0)
return 0;
counters[last] = counters[last-1]+1;
counters[last] = counters[last - 1] + 1;
}
return 1;
}
static QTNode *
findeq(QTNode *node, QTNode *ex, MemoryType memtype, QTNode *subs, bool *isfind) {
if ( (node->sign & ex->sign) != ex->sign || node->valnode->type != ex->valnode->type || node->valnode->val != ex->valnode->val )
static QTNode *
findeq(QTNode * node, QTNode * ex, MemoryType memtype, QTNode * subs, bool *isfind)
{
if ((node->sign & ex->sign) != ex->sign || node->valnode->type != ex->valnode->type || node->valnode->val != ex->valnode->val)
return node;
if ( node->flags & QTN_NOCHANGE )
return node;
if (node->flags & QTN_NOCHANGE)
return node;
if ( node->valnode->type==OPR ) {
if ( node->nchild == ex->nchild ) {
if ( QTNEq( node, ex ) ) {
QTNFree( node );
if ( subs ) {
node = QTNCopy( subs, memtype );
if (node->valnode->type == OPR)
{
if (node->nchild == ex->nchild)
{
if (QTNEq(node, ex))
{
QTNFree(node);
if (subs)
{
node = QTNCopy(subs, memtype);
node->flags |= QTN_NOCHANGE;
} else
node = NULL;
}
else
node = NULL;
*isfind = true;
}
} else if ( node->nchild > ex->nchild ) {
int *counters = (int*)palloc( sizeof(int) * node->nchild );
int i;
QTNode *tnode = (QTNode*)MEMALLOC( memtype, sizeof(QTNode) );
}
else if (node->nchild > ex->nchild)
{
int *counters = (int *) palloc(sizeof(int) * node->nchild);
int i;
QTNode *tnode = (QTNode *) MEMALLOC(memtype, sizeof(QTNode));
memset(tnode, 0, sizeof(QTNode));
tnode->child = (QTNode**)MEMALLOC( memtype, sizeof(QTNode*) * ex->nchild );
tnode->child = (QTNode **) MEMALLOC(memtype, sizeof(QTNode *) * ex->nchild);
tnode->nchild = ex->nchild;
tnode->valnode = (ITEM*)MEMALLOC( memtype, sizeof(ITEM) );
tnode->valnode = (ITEM *) MEMALLOC(memtype, sizeof(ITEM));
*(tnode->valnode) = *(ex->valnode);
for(i=0;i<ex->nchild;i++)
counters[i]=i;
for (i = 0; i < ex->nchild; i++)
counters[i] = i;
do {
tnode->sign=0;
for(i=0;i<ex->nchild;i++) {
tnode->child[i] = node->child[ counters[i] ];
do
{
tnode->sign = 0;
for (i = 0; i < ex->nchild; i++)
{
tnode->child[i] = node->child[counters[i]];
tnode->sign |= tnode->child[i]->sign;
}
if ( QTNEq( tnode, ex ) ) {
int j=0;
if (QTNEq(tnode, ex))
{
int j = 0;
MEMFREE( memtype, tnode->valnode );
MEMFREE( memtype, tnode->child );
MEMFREE( memtype, tnode );
if ( subs ) {
tnode = QTNCopy( subs, memtype );
MEMFREE(memtype, tnode->valnode);
MEMFREE(memtype, tnode->child);
MEMFREE(memtype, tnode);
if (subs)
{
tnode = QTNCopy(subs, memtype);
tnode->flags = QTN_NOCHANGE | QTN_NEEDFREE;
} else
}
else
tnode = NULL;
node->child[ counters[0] ] = tnode;
node->child[counters[0]] = tnode;
for(i=1;i<ex->nchild;i++)
node->child[ counters[i] ] = NULL;
for(i=0;i<node->nchild;i++) {
if ( node->child[i] ) {
for (i = 1; i < ex->nchild; i++)
node->child[counters[i]] = NULL;
for (i = 0; i < node->nchild; i++)
{
if (node->child[i])
{
node->child[j] = node->child[i];
j++;
}
}
node->nchild = j;
node->nchild = j;
*isfind = true;
break;
}
} while (addone(counters,ex->nchild-1,node->nchild));
if ( tnode && (tnode->flags & QTN_NOCHANGE) == 0 ) {
MEMFREE( memtype, tnode->valnode );
MEMFREE( memtype, tnode->child );
MEMFREE( memtype, tnode );
} else
QTNSort( node );
pfree( counters );
} while (addone(counters, ex->nchild - 1, node->nchild));
if (tnode && (tnode->flags & QTN_NOCHANGE) == 0)
{
MEMFREE(memtype, tnode->valnode);
MEMFREE(memtype, tnode->child);
MEMFREE(memtype, tnode);
}
else
QTNSort(node);
pfree(counters);
}
} else if ( QTNEq( node, ex ) ) {
QTNFree( node );
if ( subs ) {
node = QTNCopy( subs, memtype );
}
else if (QTNEq(node, ex))
{
QTNFree(node);
if (subs)
{
node = QTNCopy(subs, memtype);
node->flags |= QTN_NOCHANGE;
} else {
}
else
{
node = NULL;
}
*isfind = true;
}
return node;
}
}
static QTNode *
dofindsubquery( QTNode *root, QTNode *ex, MemoryType memtype, QTNode *subs, bool *isfind ) {
root = findeq( root, ex, memtype, subs, isfind );
dofindsubquery(QTNode * root, QTNode * ex, MemoryType memtype, QTNode * subs, bool *isfind)
{
root = findeq(root, ex, memtype, subs, isfind);
if ( root && (root->flags & QTN_NOCHANGE) == 0 && root->valnode->type==OPR) {
int i;
for(i=0;i<root->nchild;i++)
root->child[i] = dofindsubquery( root->child[i], ex, memtype, subs, isfind );
if (root && (root->flags & QTN_NOCHANGE) == 0 && root->valnode->type == OPR)
{
int i;
for (i = 0; i < root->nchild; i++)
root->child[i] = dofindsubquery(root->child[i], ex, memtype, subs, isfind);
}
return root;
}
static QTNode *
dropvoidsubtree( QTNode *root ) {
dropvoidsubtree(QTNode * root)
{
if ( !root )
if (!root)
return NULL;
if ( root->valnode->type==OPR ) {
int i,j=0;
if (root->valnode->type == OPR)
{
int i,
j = 0;
for(i=0;i<root->nchild;i++) {
if ( root->child[i] ) {
for (i = 0; i < root->nchild; i++)
{
if (root->child[i])
{
root->child[j] = root->child[i];
j++;
}
@ -142,88 +174,100 @@ dropvoidsubtree( QTNode *root ) {
root->nchild = j;
if ( root->valnode->val == (int4)'!' && root->nchild==0 ) {
if (root->valnode->val == (int4) '!' && root->nchild == 0)
{
QTNFree(root);
root=NULL;
} else if ( root->nchild==1 ) {
QTNode *nroot = root->child[0];
root = NULL;
}
else if (root->nchild == 1)
{
QTNode *nroot = root->child[0];
pfree(root);
root = nroot;
}
root = nroot;
}
}
return root;
}
static QTNode *
findsubquery( QTNode *root, QTNode *ex, MemoryType memtype, QTNode *subs, bool *isfind ) {
bool DidFind = false;
root = dofindsubquery( root, ex, memtype, subs, &DidFind );
findsubquery(QTNode * root, QTNode * ex, MemoryType memtype, QTNode * subs, bool *isfind)
{
bool DidFind = false;
if ( !subs && DidFind )
root = dropvoidsubtree( root );
root = dofindsubquery(root, ex, memtype, subs, &DidFind);
if ( isfind )
if (!subs && DidFind)
root = dropvoidsubtree(root);
if (isfind)
*isfind = DidFind;
return root;
}
static Oid tsqOid = InvalidOid;
static Oid tsqOid = InvalidOid;
static void
get_tsq_Oid(void)
{
int ret;
bool isnull;
int ret;
bool isnull;
if ((ret = SPI_exec("select oid from pg_type where typname='tsquery'", 1)) < 0)
/* internal error */
elog(ERROR, "SPI_exec to get tsquery oid returns %d", ret);
if ((ret = SPI_exec("select oid from pg_type where typname='tsquery'", 1)) < 0)
/* internal error */
elog(ERROR, "SPI_exec to get tsquery oid returns %d", ret);
if (SPI_processed < 0)
/* internal error */
elog(ERROR, "There is no tsvector type");
tsqOid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
if (tsqOid == InvalidOid)
/* internal error */
elog(ERROR, "tsquery type has InvalidOid");
if (SPI_processed < 0)
/* internal error */
elog(ERROR, "There is no tsvector type");
tsqOid = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull));
if (tsqOid == InvalidOid)
/* internal error */
elog(ERROR, "tsquery type has InvalidOid");
}
PG_FUNCTION_INFO_V1(tsquery_rewrite);
PG_FUNCTION_INFO_V1(rewrite_accum);
Datum rewrite_accum(PG_FUNCTION_ARGS);
Datum rewrite_accum(PG_FUNCTION_ARGS);
Datum
rewrite_accum(PG_FUNCTION_ARGS) {
QUERYTYPE *acc = (QUERYTYPE *) PG_GETARG_POINTER(0);
ArrayType *qa = (ArrayType *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
QUERYTYPE *q;
QTNode *qex, *subs = NULL, *acctree;
bool isfind = false;
Datum *elemsp;
int nelemsp;
Datum
rewrite_accum(PG_FUNCTION_ARGS)
{
QUERYTYPE *acc = (QUERYTYPE *) PG_GETARG_POINTER(0);
ArrayType *qa = (ArrayType *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
QUERYTYPE *q;
QTNode *qex,
*subs = NULL,
*acctree;
bool isfind = false;
Datum *elemsp;
int nelemsp;
AggregateContext = ((AggState *) fcinfo->context)->aggcontext;
if (acc == NULL || PG_ARGISNULL(0)) {
acc = (QUERYTYPE*)MEMALLOC( AggMemory, sizeof(QUERYTYPE) );
if (acc == NULL || PG_ARGISNULL(0))
{
acc = (QUERYTYPE *) MEMALLOC(AggMemory, sizeof(QUERYTYPE));
acc->len = HDRSIZEQT;
acc->size = 0;
}
if ( qa == NULL || PG_ARGISNULL(1) ) {
PG_FREE_IF_COPY( qa, 1 );
PG_RETURN_POINTER( acc );
if (qa == NULL || PG_ARGISNULL(1))
{
PG_FREE_IF_COPY(qa, 1);
PG_RETURN_POINTER(acc);
}
if ( ARR_NDIM(qa) != 1 )
if (ARR_NDIM(qa) != 1)
elog(ERROR, "array must be one-dimensional, not %d dimension", ARR_NDIM(qa));
if ( ArrayGetNItems( ARR_NDIM(qa), ARR_DIMS(qa)) != 3 )
if (ArrayGetNItems(ARR_NDIM(qa), ARR_DIMS(qa)) != 3)
elog(ERROR, "array should have only three elements");
if (tsqOid == InvalidOid) {
if (tsqOid == InvalidOid)
{
SPI_connect();
get_tsq_Oid();
SPI_finish();
@ -232,108 +276,122 @@ rewrite_accum(PG_FUNCTION_ARGS) {
if (ARR_ELEMTYPE(qa) != tsqOid)
elog(ERROR, "array should contain tsquery type");
deconstruct_array(qa, tsqOid, -1, false, 'i', &elemsp, NULL, &nelemsp);
deconstruct_array(qa, tsqOid, -1, false, 'i', &elemsp, NULL, &nelemsp);
q = (QUERYTYPE*)DatumGetPointer( elemsp[0] );
if ( q->size == 0 ) {
pfree( elemsp );
PG_RETURN_POINTER( acc );
q = (QUERYTYPE *) DatumGetPointer(elemsp[0]);
if (q->size == 0)
{
pfree(elemsp);
PG_RETURN_POINTER(acc);
}
if ( !acc->size ) {
if ( acc->len > HDRSIZEQT ) {
pfree( elemsp );
PG_RETURN_POINTER( acc );
} else
acctree = QT2QTN( GETQUERY(q), GETOPERAND(q) );
} else
acctree = QT2QTN( GETQUERY(acc), GETOPERAND(acc) );
QTNTernary( acctree );
QTNSort( acctree );
q = (QUERYTYPE*)DatumGetPointer( elemsp[1] );
if ( q->size == 0 ) {
pfree( elemsp );
PG_RETURN_POINTER( acc );
if (!acc->size)
{
if (acc->len > HDRSIZEQT)
{
pfree(elemsp);
PG_RETURN_POINTER(acc);
}
else
acctree = QT2QTN(GETQUERY(q), GETOPERAND(q));
}
qex = QT2QTN( GETQUERY(q), GETOPERAND(q) );
QTNTernary( qex );
QTNSort( qex );
q = (QUERYTYPE*)DatumGetPointer( elemsp[2] );
if ( q->size )
subs = QT2QTN( GETQUERY(q), GETOPERAND(q) );
else
acctree = QT2QTN(GETQUERY(acc), GETOPERAND(acc));
acctree = findsubquery( acctree, qex, PlainMemory, subs, &isfind );
QTNTernary(acctree);
QTNSort(acctree);
if ( isfind || !acc->size ) {
q = (QUERYTYPE *) DatumGetPointer(elemsp[1]);
if (q->size == 0)
{
pfree(elemsp);
PG_RETURN_POINTER(acc);
}
qex = QT2QTN(GETQUERY(q), GETOPERAND(q));
QTNTernary(qex);
QTNSort(qex);
q = (QUERYTYPE *) DatumGetPointer(elemsp[2]);
if (q->size)
subs = QT2QTN(GETQUERY(q), GETOPERAND(q));
acctree = findsubquery(acctree, qex, PlainMemory, subs, &isfind);
if (isfind || !acc->size)
{
/* pfree( acc ); do not pfree(p), because nodeAgg.c will */
if ( acctree ) {
QTNBinary( acctree );
acc = QTN2QT( acctree, AggMemory );
} else {
acc = (QUERYTYPE*)MEMALLOC( AggMemory, HDRSIZEQT*2 );
if (acctree)
{
QTNBinary(acctree);
acc = QTN2QT(acctree, AggMemory);
}
else
{
acc = (QUERYTYPE *) MEMALLOC(AggMemory, HDRSIZEQT * 2);
acc->len = HDRSIZEQT * 2;
acc->size = 0;
}
}
pfree( elemsp );
QTNFree( qex );
QTNFree( subs );
QTNFree( acctree );
pfree(elemsp);
QTNFree(qex);
QTNFree(subs);
QTNFree(acctree);
PG_RETURN_POINTER( acc );
PG_RETURN_POINTER(acc);
}
PG_FUNCTION_INFO_V1(rewrite_finish);
Datum rewrite_finish(PG_FUNCTION_ARGS);
Datum rewrite_finish(PG_FUNCTION_ARGS);
Datum
rewrite_finish(PG_FUNCTION_ARGS) {
QUERYTYPE *acc = (QUERYTYPE *) PG_GETARG_POINTER(0);
QUERYTYPE *rewrited;
if (acc == NULL || PG_ARGISNULL(0) || acc->size == 0 ) {
acc = (QUERYTYPE*)palloc(sizeof(QUERYTYPE));
Datum
rewrite_finish(PG_FUNCTION_ARGS)
{
QUERYTYPE *acc = (QUERYTYPE *) PG_GETARG_POINTER(0);
QUERYTYPE *rewrited;
if (acc == NULL || PG_ARGISNULL(0) || acc->size == 0)
{
acc = (QUERYTYPE *) palloc(sizeof(QUERYTYPE));
acc->len = HDRSIZEQT;
acc->size = 0;
}
rewrited = (QUERYTYPE*) palloc( acc->len );
memcpy( rewrited, acc, acc->len );
pfree( acc );
rewrited = (QUERYTYPE *) palloc(acc->len);
memcpy(rewrited, acc, acc->len);
pfree(acc);
PG_RETURN_POINTER(rewrited);
PG_RETURN_POINTER(rewrited);
}
Datum tsquery_rewrite(PG_FUNCTION_ARGS);
Datum tsquery_rewrite(PG_FUNCTION_ARGS);
Datum
tsquery_rewrite(PG_FUNCTION_ARGS) {
tsquery_rewrite(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
text *in = PG_GETARG_TEXT_P(1);
text *in = PG_GETARG_TEXT_P(1);
QUERYTYPE *rewrited = query;
QTNode *tree;
char *buf;
void *plan;
Portal portal;
bool isnull;
int i;
QTNode *tree;
char *buf;
void *plan;
Portal portal;
bool isnull;
int i;
if ( query->size == 0 ) {
if (query->size == 0)
{
PG_FREE_IF_COPY(in, 1);
PG_RETURN_POINTER( rewrited );
PG_RETURN_POINTER(rewrited);
}
tree = QT2QTN( GETQUERY(query), GETOPERAND(query) );
QTNTernary( tree );
QTNSort( tree );
tree = QT2QTN(GETQUERY(query), GETOPERAND(query));
QTNTernary(tree);
QTNSort(tree);
buf = (char*)palloc( VARSIZE(in) );
buf = (char *) palloc(VARSIZE(in));
memcpy(buf, VARDATA(in), VARSIZE(in) - VARHDRSZ);
buf[ VARSIZE(in) - VARHDRSZ ] = '\0';
buf[VARSIZE(in) - VARHDRSZ] = '\0';
SPI_connect();
@ -345,132 +403,147 @@ tsquery_rewrite(PG_FUNCTION_ARGS) {
if ((portal = SPI_cursor_open(NULL, plan, NULL, NULL, false)) == NULL)
elog(ERROR, "SPI_cursor_open('%s') returns NULL", buf);
SPI_cursor_fetch(portal, true, 100);
if (SPI_tuptable->tupdesc->natts != 2)
elog(ERROR, "number of fields doesn't equal to 2");
if (SPI_gettypeid(SPI_tuptable->tupdesc, 1) != tsqOid )
if (SPI_gettypeid(SPI_tuptable->tupdesc, 1) != tsqOid)
elog(ERROR, "column #1 isn't of tsquery type");
if (SPI_gettypeid(SPI_tuptable->tupdesc, 2) != tsqOid )
if (SPI_gettypeid(SPI_tuptable->tupdesc, 2) != tsqOid)
elog(ERROR, "column #2 isn't of tsquery type");
while (SPI_processed > 0 && tree ) {
for (i = 0; i < SPI_processed && tree; i++) {
Datum qdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull);
Datum sdata;
while (SPI_processed > 0 && tree)
{
for (i = 0; i < SPI_processed && tree; i++)
{
Datum qdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull);
Datum sdata;
if ( isnull ) continue;
if (isnull)
continue;
sdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 2, &isnull);
if (!isnull) {
QUERYTYPE *qtex = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(qdata));
QUERYTYPE *qtsubs = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(sdata));
QTNode *qex, *qsubs = NULL;
if (!isnull)
{
QUERYTYPE *qtex = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(qdata));
QUERYTYPE *qtsubs = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(sdata));
QTNode *qex,
*qsubs = NULL;
if (qtex->size == 0) {
if ( qtex != (QUERYTYPE *) DatumGetPointer(qdata) )
pfree( qtex );
if ( qtsubs != (QUERYTYPE *) DatumGetPointer(sdata) )
pfree( qtsubs );
if (qtex->size == 0)
{
if (qtex != (QUERYTYPE *) DatumGetPointer(qdata))
pfree(qtex);
if (qtsubs != (QUERYTYPE *) DatumGetPointer(sdata))
pfree(qtsubs);
continue;
}
qex = QT2QTN( GETQUERY(qtex), GETOPERAND(qtex) );
qex = QT2QTN(GETQUERY(qtex), GETOPERAND(qtex));
QTNTernary( qex );
QTNSort( qex );
QTNTernary(qex);
QTNSort(qex);
if ( qtsubs->size )
qsubs = QT2QTN( GETQUERY(qtsubs), GETOPERAND(qtsubs) );
if (qtsubs->size)
qsubs = QT2QTN(GETQUERY(qtsubs), GETOPERAND(qtsubs));
tree = findsubquery( tree, qex, SPIMemory, qsubs, NULL );
QTNFree( qex );
if ( qtex != (QUERYTYPE *) DatumGetPointer(qdata) )
pfree( qtex );
QTNFree( qsubs );
if ( qtsubs != (QUERYTYPE *) DatumGetPointer(sdata) )
pfree( qtsubs );
tree = findsubquery(tree, qex, SPIMemory, qsubs, NULL);
QTNFree(qex);
if (qtex != (QUERYTYPE *) DatumGetPointer(qdata))
pfree(qtex);
QTNFree(qsubs);
if (qtsubs != (QUERYTYPE *) DatumGetPointer(sdata))
pfree(qtsubs);
}
}
SPI_freetuptable(SPI_tuptable);
SPI_cursor_fetch(portal, true, 100);
}
SPI_freetuptable(SPI_tuptable);
SPI_cursor_close(portal);
SPI_freeplan(plan);
SPI_finish();
SPI_finish();
if ( tree ) {
QTNBinary( tree );
rewrited = QTN2QT( tree, PlainMemory );
QTNFree( tree );
if (tree)
{
QTNBinary(tree);
rewrited = QTN2QT(tree, PlainMemory);
QTNFree(tree);
PG_FREE_IF_COPY(query, 0);
} else {
}
else
{
rewrited->len = HDRSIZEQT;
rewrited->size = 0;
}
pfree(buf);
PG_FREE_IF_COPY(in, 1);
PG_RETURN_POINTER( rewrited );
PG_RETURN_POINTER(rewrited);
}
PG_FUNCTION_INFO_V1(tsquery_rewrite_query);
Datum tsquery_rewrite_query(PG_FUNCTION_ARGS);
Datum tsquery_rewrite_query(PG_FUNCTION_ARGS);
Datum
tsquery_rewrite_query(PG_FUNCTION_ARGS) {
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *ex = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
QUERYTYPE *subst = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(2)));
QUERYTYPE *rewrited = query;
QTNode *tree, *qex, *subs = NULL;
tsquery_rewrite_query(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *ex = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
QUERYTYPE *subst = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(2)));
QUERYTYPE *rewrited = query;
QTNode *tree,
*qex,
*subs = NULL;
if ( query->size == 0 || ex->size == 0 ) {
PG_FREE_IF_COPY(ex, 1);
PG_FREE_IF_COPY(subst, 2);
PG_RETURN_POINTER( rewrited );
}
tree = QT2QTN( GETQUERY(query), GETOPERAND(query) );
QTNTernary( tree );
QTNSort( tree );
qex = QT2QTN( GETQUERY(ex), GETOPERAND(ex) );
QTNTernary( qex );
QTNSort( qex );
if ( subst->size )
subs = QT2QTN( GETQUERY(subst), GETOPERAND(subst) );
tree = findsubquery( tree, qex, PlainMemory, subs, NULL );
QTNFree( qex );
QTNFree( subs );
if ( !tree ) {
rewrited->len = HDRSIZEQT;
rewrited->size = 0;
PG_FREE_IF_COPY(ex, 1);
PG_FREE_IF_COPY(subst, 2);
PG_RETURN_POINTER( rewrited );
} else {
QTNBinary( tree );
rewrited = QTN2QT( tree, PlainMemory );
QTNFree( tree );
if (query->size == 0 || ex->size == 0)
{
PG_FREE_IF_COPY(ex, 1);
PG_FREE_IF_COPY(subst, 2);
PG_RETURN_POINTER(rewrited);
}
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_FREE_IF_COPY(subst, 2);
PG_RETURN_POINTER( rewrited );
}
tree = QT2QTN(GETQUERY(query), GETOPERAND(query));
QTNTernary(tree);
QTNSort(tree);
qex = QT2QTN(GETQUERY(ex), GETOPERAND(ex));
QTNTernary(qex);
QTNSort(qex);
if (subst->size)
subs = QT2QTN(GETQUERY(subst), GETOPERAND(subst));
tree = findsubquery(tree, qex, PlainMemory, subs, NULL);
QTNFree(qex);
QTNFree(subs);
if (!tree)
{
rewrited->len = HDRSIZEQT;
rewrited->size = 0;
PG_FREE_IF_COPY(ex, 1);
PG_FREE_IF_COPY(subst, 2);
PG_RETURN_POINTER(rewrited);
}
else
{
QTNBinary(tree);
rewrited = QTN2QT(tree, PlainMemory);
QTNFree(tree);
}
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_FREE_IF_COPY(subst, 2);
PG_RETURN_POINTER(rewrited);
}

View File

@ -4,168 +4,188 @@
#include "query_util.h"
PG_FUNCTION_INFO_V1(tsquery_numnode);
Datum tsquery_numnode(PG_FUNCTION_ARGS);
Datum tsquery_numnode(PG_FUNCTION_ARGS);
Datum
tsquery_numnode(PG_FUNCTION_ARGS) {
tsquery_numnode(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
int nnode = query->size;
PG_FREE_IF_COPY(query,0);
int nnode = query->size;
PG_FREE_IF_COPY(query, 0);
PG_RETURN_INT32(nnode);
}
static QTNode*
join_tsqueries(QUERYTYPE *a, QUERYTYPE *b) {
QTNode *res=(QTNode*)palloc0( sizeof(QTNode) );
static QTNode *
join_tsqueries(QUERYTYPE * a, QUERYTYPE * b)
{
QTNode *res = (QTNode *) palloc0(sizeof(QTNode));
res->flags |= QTN_NEEDFREE;
res->valnode = (ITEM*)palloc0( sizeof(ITEM) );
res->valnode = (ITEM *) palloc0(sizeof(ITEM));
res->valnode->type = OPR;
res->child = (QTNode**)palloc0( sizeof(QTNode*)*2 );
res->child[0] = QT2QTN( GETQUERY(b), GETOPERAND(b) );
res->child[1] = QT2QTN( GETQUERY(a), GETOPERAND(a) );
res->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
res->child[0] = QT2QTN(GETQUERY(b), GETOPERAND(b));
res->child[1] = QT2QTN(GETQUERY(a), GETOPERAND(a));
res->nchild = 2;
return res;
}
PG_FUNCTION_INFO_V1(tsquery_and);
Datum tsquery_and(PG_FUNCTION_ARGS);
Datum tsquery_and(PG_FUNCTION_ARGS);
Datum
tsquery_and(PG_FUNCTION_ARGS) {
tsquery_and(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *b = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
QTNode *res;
QTNode *res;
QUERYTYPE *query;
if ( a->size == 0 ) {
PG_FREE_IF_COPY(a,1);
if (a->size == 0)
{
PG_FREE_IF_COPY(a, 1);
PG_RETURN_POINTER(b);
} else if ( b->size == 0 ) {
PG_FREE_IF_COPY(b,1);
}
else if (b->size == 0)
{
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(a);
}
}
res = join_tsqueries(a, b);
res->valnode->val = '&';
query = QTN2QT( res, PlainMemory );
query = QTN2QT(res, PlainMemory);
QTNFree(res);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(query);
}
PG_FUNCTION_INFO_V1(tsquery_or);
Datum tsquery_or(PG_FUNCTION_ARGS);
Datum tsquery_or(PG_FUNCTION_ARGS);
Datum
tsquery_or(PG_FUNCTION_ARGS) {
tsquery_or(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *b = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
QTNode *res;
QTNode *res;
QUERYTYPE *query;
if ( a->size == 0 ) {
PG_FREE_IF_COPY(a,1);
if (a->size == 0)
{
PG_FREE_IF_COPY(a, 1);
PG_RETURN_POINTER(b);
} else if ( b->size == 0 ) {
PG_FREE_IF_COPY(b,1);
}
else if (b->size == 0)
{
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(a);
}
}
res = join_tsqueries(a, b);
res->valnode->val = '|';
query = QTN2QT( res, PlainMemory );
query = QTN2QT(res, PlainMemory);
QTNFree(res);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(query);
}
PG_FUNCTION_INFO_V1(tsquery_not);
Datum tsquery_not(PG_FUNCTION_ARGS);
Datum tsquery_not(PG_FUNCTION_ARGS);
Datum
tsquery_not(PG_FUNCTION_ARGS) {
tsquery_not(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QTNode *res;
QTNode *res;
QUERYTYPE *query;
if ( a->size == 0 )
if (a->size == 0)
PG_RETURN_POINTER(a);
res=(QTNode*)palloc0( sizeof(QTNode) );
res = (QTNode *) palloc0(sizeof(QTNode));
res->flags |= QTN_NEEDFREE;
res->valnode = (ITEM*)palloc0( sizeof(ITEM) );
res->valnode = (ITEM *) palloc0(sizeof(ITEM));
res->valnode->type = OPR;
res->valnode->val = '!';
res->child = (QTNode**)palloc0( sizeof(QTNode*) );
res->child[0] = QT2QTN( GETQUERY(a), GETOPERAND(a) );
res->child = (QTNode **) palloc0(sizeof(QTNode *));
res->child[0] = QT2QTN(GETQUERY(a), GETOPERAND(a));
res->nchild = 1;
query = QTN2QT( res, PlainMemory );
query = QTN2QT(res, PlainMemory);
QTNFree(res);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(a, 0);
PG_RETURN_POINTER(query);
}
static int
CompareTSQ( QUERYTYPE *a, QUERYTYPE *b ) {
if ( a->size != b->size ) {
return ( a->size < b->size ) ? -1 : 1;
} else if ( a->len != b->len ) {
return ( a->len < b->len ) ? -1 : 1;
} else {
QTNode *an = QT2QTN( GETQUERY(a), GETOPERAND(a) );
QTNode *bn = QT2QTN( GETQUERY(b), GETOPERAND(b) );
int res = QTNodeCompare(an, bn);
CompareTSQ(QUERYTYPE * a, QUERYTYPE * b)
{
if (a->size != b->size)
{
return (a->size < b->size) ? -1 : 1;
}
else if (a->len != b->len)
{
return (a->len < b->len) ? -1 : 1;
}
else
{
QTNode *an = QT2QTN(GETQUERY(a), GETOPERAND(a));
QTNode *bn = QT2QTN(GETQUERY(b), GETOPERAND(b));
int res = QTNodeCompare(an, bn);
QTNFree(an);
QTNFree(bn);
return res;
return res;
}
return 0;
}
PG_FUNCTION_INFO_V1(tsquery_cmp); \
Datum tsquery_cmp(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(tsquery_cmp);
\
Datum tsquery_cmp(PG_FUNCTION_ARGS);
Datum
tsquery_cmp(PG_FUNCTION_ARGS) {
tsquery_cmp(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *b = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
int res = CompareTSQ(a,b);
int res = CompareTSQ(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_INT32(res);
}
#define CMPFUNC( NAME, ACTION ) \
#define CMPFUNC( NAME, ACTION ) \
PG_FUNCTION_INFO_V1(NAME); \
Datum NAME(PG_FUNCTION_ARGS); \
\
Datum \
NAME(PG_FUNCTION_ARGS) { \
NAME(PG_FUNCTION_ARGS) { \
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0))); \
QUERYTYPE *b = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1))); \
int res = CompareTSQ(a,b); \
@ -176,12 +196,9 @@ NAME(PG_FUNCTION_ARGS) { \
PG_RETURN_BOOL( ACTION ); \
}
CMPFUNC( tsquery_lt, res <0 );
CMPFUNC( tsquery_le, res<=0 );
CMPFUNC( tsquery_eq, res==0 );
CMPFUNC( tsquery_ge, res>=0 );
CMPFUNC( tsquery_gt, res >0 );
CMPFUNC( tsquery_ne, res!=0 );
CMPFUNC(tsquery_lt, res < 0);
CMPFUNC(tsquery_le, res <= 0);
CMPFUNC(tsquery_eq, res == 0);
CMPFUNC(tsquery_ge, res >= 0);
CMPFUNC(tsquery_gt, res > 0);
CMPFUNC(tsquery_ne, res != 0);

View File

@ -2,153 +2,180 @@
#include "executor/spi.h"
#include "query_util.h"
QTNode*
QT2QTN( ITEM *in, char *operand ) {
QTNode *node = (QTNode*)palloc0( sizeof(QTNode) );
QTNode *
QT2QTN(ITEM * in, char *operand)
{
QTNode *node = (QTNode *) palloc0(sizeof(QTNode));
node->valnode = in;
if (in->type == OPR) {
node->child = (QTNode**)palloc0( sizeof(QTNode*) * 2 );
node->child[0] = QT2QTN( in + 1, operand );
if (in->type == OPR)
{
node->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
node->child[0] = QT2QTN(in + 1, operand);
node->sign = node->child[0]->sign;
if (in->val == (int4) '!')
node->nchild = 1;
else {
else
{
node->nchild = 2;
node->child[1] = QT2QTN( in + in->left, operand );
node->child[1] = QT2QTN(in + in->left, operand);
node->sign |= node->child[1]->sign;
}
} else if ( operand ) {
node->word = operand + in->distance;
node->sign = 1 << ( in->val % 32 );
}
return node;
else if (operand)
{
node->word = operand + in->distance;
node->sign = 1 << (in->val % 32);
}
return node;
}
void
QTNFree( QTNode* in ) {
if ( !in )
void
QTNFree(QTNode * in)
{
if (!in)
return;
if ( in->valnode->type == VAL && in->word && (in->flags & QTN_WORDFREE) !=0 )
pfree( in->word );
if (in->valnode->type == VAL && in->word && (in->flags & QTN_WORDFREE) != 0)
pfree(in->word);
if ( in->child ) {
if ( in->valnode ) {
if ( in->valnode->type == OPR && in->nchild > 0 ) {
int i;
for (i=0;i<in->nchild;i++)
QTNFree( in->child[i] );
if (in->child)
{
if (in->valnode)
{
if (in->valnode->type == OPR && in->nchild > 0)
{
int i;
for (i = 0; i < in->nchild; i++)
QTNFree(in->child[i]);
}
if ( in->flags & QTN_NEEDFREE )
pfree( in->valnode );
if (in->flags & QTN_NEEDFREE)
pfree(in->valnode);
}
pfree( in->child );
pfree(in->child);
}
pfree( in );
pfree(in);
}
int
QTNodeCompare( QTNode *an, QTNode *bn ) {
if ( an->valnode->type != bn->valnode->type )
return ( an->valnode->type > bn->valnode->type ) ? -1 : 1;
else if ( an->valnode->val != bn->valnode->val )
return ( an->valnode->val > bn->valnode->val ) ? -1 : 1;
else if ( an->valnode->type == VAL ) {
if ( an->valnode->length == bn->valnode->length )
return strncmp( an->word, bn->word, an->valnode->length );
else
return ( an->valnode->length > bn->valnode->length ) ? -1 : 1;
} else if ( an->nchild != bn->nchild ) {
return ( an->nchild > bn->nchild ) ? -1 : 1;
} else {
int i,res;
for( i=0; i<an->nchild; i++ )
if ( (res=QTNodeCompare(an->child[i], bn->child[i]))!=0 )
QTNodeCompare(QTNode * an, QTNode * bn)
{
if (an->valnode->type != bn->valnode->type)
return (an->valnode->type > bn->valnode->type) ? -1 : 1;
else if (an->valnode->val != bn->valnode->val)
return (an->valnode->val > bn->valnode->val) ? -1 : 1;
else if (an->valnode->type == VAL)
{
if (an->valnode->length == bn->valnode->length)
return strncmp(an->word, bn->word, an->valnode->length);
else
return (an->valnode->length > bn->valnode->length) ? -1 : 1;
}
else if (an->nchild != bn->nchild)
{
return (an->nchild > bn->nchild) ? -1 : 1;
}
else
{
int i,
res;
for (i = 0; i < an->nchild; i++)
if ((res = QTNodeCompare(an->child[i], bn->child[i])) != 0)
return res;
}
}
return 0;
}
static int
cmpQTN( const void *a, const void *b ) {
return QTNodeCompare( *(QTNode**)a, *(QTNode**)b );
cmpQTN(const void *a, const void *b)
{
return QTNodeCompare(*(QTNode **) a, *(QTNode **) b);
}
void
QTNSort( QTNode* in ) {
int i;
if ( in->valnode->type != OPR )
void
QTNSort(QTNode * in)
{
int i;
if (in->valnode->type != OPR)
return;
for (i=0;i<in->nchild;i++)
QTNSort( in->child[i] );
if ( in->nchild > 1 )
qsort((void *) in->child, in->nchild, sizeof(QTNode*), cmpQTN);
for (i = 0; i < in->nchild; i++)
QTNSort(in->child[i]);
if (in->nchild > 1)
qsort((void *) in->child, in->nchild, sizeof(QTNode *), cmpQTN);
}
bool
QTNEq( QTNode* a, QTNode* b ) {
uint32 sign = a->sign & b->sign;
if ( !(sign == a->sign && sign == b->sign) )
bool
QTNEq(QTNode * a, QTNode * b)
{
uint32 sign = a->sign & b->sign;
if (!(sign == a->sign && sign == b->sign))
return 0;
return ( QTNodeCompare(a,b) == 0 ) ? true : false;
return (QTNodeCompare(a, b) == 0) ? true : false;
}
void
QTNTernary( QTNode* in ) {
int i;
void
QTNTernary(QTNode * in)
{
int i;
if ( in->valnode->type != OPR )
if (in->valnode->type != OPR)
return;
for (i=0;i<in->nchild;i++)
QTNTernary( in->child[i] );
for (i = 0; i < in->nchild; i++)
QTNTernary(in->child[i]);
for (i=0;i<in->nchild;i++) {
if ( in->valnode->type == in->child[i]->valnode->type && in->valnode->val == in->child[i]->valnode->val ) {
QTNode* cc = in->child[i];
int oldnchild = in->nchild;
for (i = 0; i < in->nchild; i++)
{
if (in->valnode->type == in->child[i]->valnode->type && in->valnode->val == in->child[i]->valnode->val)
{
QTNode *cc = in->child[i];
int oldnchild = in->nchild;
in->nchild += cc->nchild-1;
in->child = (QTNode**)repalloc( in->child, in->nchild * sizeof(QTNode*) );
if ( i+1 != oldnchild )
memmove( in->child + i + cc->nchild, in->child + i + 1,
(oldnchild-i-1)*sizeof(QTNode*) );
in->nchild += cc->nchild - 1;
in->child = (QTNode **) repalloc(in->child, in->nchild * sizeof(QTNode *));
memcpy( in->child + i, cc->child, cc->nchild * sizeof(QTNode*) );
i += cc->nchild-1;
if (i + 1 != oldnchild)
memmove(in->child + i + cc->nchild, in->child + i + 1,
(oldnchild - i - 1) * sizeof(QTNode *));
memcpy(in->child + i, cc->child, cc->nchild * sizeof(QTNode *));
i += cc->nchild - 1;
pfree(cc);
}
}
}
}
void
QTNBinary( QTNode* in ) {
int i;
void
QTNBinary(QTNode * in)
{
int i;
if ( in->valnode->type != OPR )
if (in->valnode->type != OPR)
return;
for (i=0;i<in->nchild;i++)
QTNBinary( in->child[i] );
for (i = 0; i < in->nchild; i++)
QTNBinary(in->child[i]);
if ( in->nchild <= 2 )
return;
if (in->nchild <= 2)
return;
while( in->nchild > 2 ) {
QTNode *nn = (QTNode*)palloc0( sizeof(QTNode) );
nn->valnode = (ITEM*)palloc0( sizeof(ITEM) );
nn->child = (QTNode**)palloc0( sizeof(QTNode*) * 2 );
while (in->nchild > 2)
{
QTNode *nn = (QTNode *) palloc0(sizeof(QTNode));
nn->valnode = (ITEM *) palloc0(sizeof(ITEM));
nn->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
nn->nchild = 2;
nn->flags = QTN_NEEDFREE;
@ -161,97 +188,114 @@ QTNBinary( QTNode* in ) {
nn->valnode->val = in->valnode->val;
in->child[0] = nn;
in->child[1] = in->child[ in->nchild-1 ];
in->child[1] = in->child[in->nchild - 1];
in->nchild--;
}
}
}
static void
cntsize(QTNode *in, int4 *sumlen, int4 *nnode) {
cntsize(QTNode * in, int4 *sumlen, int4 *nnode)
{
*nnode += 1;
if ( in->valnode->type == OPR ) {
int i;
for (i=0;i<in->nchild;i++)
if (in->valnode->type == OPR)
{
int i;
for (i = 0; i < in->nchild; i++)
cntsize(in->child[i], sumlen, nnode);
} else {
*sumlen += in->valnode->length+1;
}
else
{
*sumlen += in->valnode->length + 1;
}
}
typedef struct {
ITEM *curitem;
char *operand;
char *curoperand;
} QTN2QTState;
typedef struct
{
ITEM *curitem;
char *operand;
char *curoperand;
} QTN2QTState;
static void
fillQT( QTN2QTState *state, QTNode *in ) {
fillQT(QTN2QTState * state, QTNode * in)
{
*(state->curitem) = *(in->valnode);
if ( in->valnode->type == VAL ) {
memcpy( state->curoperand, in->word, in->valnode->length );
if (in->valnode->type == VAL)
{
memcpy(state->curoperand, in->word, in->valnode->length);
state->curitem->distance = state->curoperand - state->operand;
state->curoperand[ in->valnode->length ] = '\0';
state->curoperand += in->valnode->length + 1;
state->curoperand[in->valnode->length] = '\0';
state->curoperand += in->valnode->length + 1;
state->curitem++;
} else {
ITEM *curitem = state->curitem;
Assert( in->nchild<=2 );
state->curitem++;
fillQT( state, in->child[0] );
if ( in->nchild==2 ) {
curitem->left = state->curitem - curitem;
fillQT( state, in->child[1] );
}
}
}
else
{
ITEM *curitem = state->curitem;
QUERYTYPE*
QTN2QT( QTNode* in, MemoryType memtype ) {
QUERYTYPE *out;
int len;
int sumlen=0, nnode=0;
QTN2QTState state;
Assert(in->nchild <= 2);
state->curitem++;
fillQT(state, in->child[0]);
if (in->nchild == 2)
{
curitem->left = state->curitem - curitem;
fillQT(state, in->child[1]);
}
}
}
QUERYTYPE *
QTN2QT(QTNode * in, MemoryType memtype)
{
QUERYTYPE *out;
int len;
int sumlen = 0,
nnode = 0;
QTN2QTState state;
cntsize(in, &sumlen, &nnode);
len = COMPUTESIZE( nnode, sumlen );
len = COMPUTESIZE(nnode, sumlen);
out = (QUERYTYPE*)MEMALLOC(memtype, len);
out->len = len;
out->size = nnode;
out = (QUERYTYPE *) MEMALLOC(memtype, len);
out->len = len;
out->size = nnode;
state.curitem = GETQUERY( out );
state.operand = state.curoperand = GETOPERAND( out );
state.curitem = GETQUERY(out);
state.operand = state.curoperand = GETOPERAND(out);
fillQT( &state, in );
return out;
fillQT(&state, in);
return out;
}
QTNode *
QTNCopy( QTNode* in, MemoryType memtype ) {
QTNode *out = (QTNode*)MEMALLOC( memtype, sizeof(QTNode) );
QTNCopy(QTNode * in, MemoryType memtype)
{
QTNode *out = (QTNode *) MEMALLOC(memtype, sizeof(QTNode));
*out = *in;
out->valnode = (ITEM*)MEMALLOC( memtype, sizeof(ITEM) );
out->valnode = (ITEM *) MEMALLOC(memtype, sizeof(ITEM));
*(out->valnode) = *(in->valnode);
out->flags |= QTN_NEEDFREE;
if ( in->valnode->type == VAL ) {
out->word = MEMALLOC( memtype, in->valnode->length + 1 );
memcpy( out->word, in->word, in->valnode->length );
out->word[ in->valnode->length ] = '\0';
if (in->valnode->type == VAL)
{
out->word = MEMALLOC(memtype, in->valnode->length + 1);
memcpy(out->word, in->word, in->valnode->length);
out->word[in->valnode->length] = '\0';
out->flags |= QTN_WORDFREE;
} else {
int i;
}
else
{
int i;
out->child = (QTNode**)MEMALLOC( memtype, sizeof(QTNode*) * in->nchild );
out->child = (QTNode **) MEMALLOC(memtype, sizeof(QTNode *) * in->nchild);
for(i=0;i<in->nchild;i++)
out->child[i] = QTNCopy( in->child[i], memtype );
}
for (i = 0; i < in->nchild; i++)
out->child[i] = QTNCopy(in->child[i], memtype);
}
return out;
}

View File

@ -6,39 +6,41 @@
#include "query.h"
typedef struct QTNode {
ITEM *valnode;
uint32 flags;
int4 nchild;
char *word;
uint32 sign;
struct QTNode **child;
} QTNode;
typedef struct QTNode
{
ITEM *valnode;
uint32 flags;
int4 nchild;
char *word;
uint32 sign;
struct QTNode **child;
} QTNode;
#define QTN_NEEDFREE 0x01
#define QTN_NOCHANGE 0x02
#define QTN_WORDFREE 0x04
#define QTN_NEEDFREE 0x01
#define QTN_NOCHANGE 0x02
#define QTN_WORDFREE 0x04
typedef enum {
typedef enum
{
PlainMemory,
SPIMemory,
AggMemory
} MemoryType;
} MemoryType;
QTNode* QT2QTN( ITEM *in, char *operand );
QUERYTYPE* QTN2QT( QTNode* in, MemoryType memtype );
void QTNFree( QTNode* in );
void QTNSort( QTNode* in );
void QTNTernary( QTNode* in );
void QTNBinary( QTNode* in );
int QTNodeCompare( QTNode *an, QTNode *bn );
QTNode* QTNCopy( QTNode* in, MemoryType memtype);
bool QTNEq( QTNode* a, QTNode* b );
QTNode *QT2QTN(ITEM * in, char *operand);
QUERYTYPE *QTN2QT(QTNode * in, MemoryType memtype);
void QTNFree(QTNode * in);
void QTNSort(QTNode * in);
void QTNTernary(QTNode * in);
void QTNBinary(QTNode * in);
int QTNodeCompare(QTNode * an, QTNode * bn);
QTNode *QTNCopy(QTNode * in, MemoryType memtype);
bool QTNEq(QTNode * a, QTNode * b);
extern MemoryContext AggregateContext;
extern MemoryContext AggregateContext;
#define MEMALLOC(us, s) ( ((us)==SPIMemory) ? SPI_palloc(s) : ( ( (us)==PlainMemory ) ? palloc(s) : MemoryContextAlloc(AggregateContext, (s)) ) )
#define MEMFREE(us, p) ( ((us)==SPIMemory) ? SPI_pfree(p) : pfree(p) )
#define MEMALLOC(us, s) ( ((us)==SPIMemory) ? SPI_palloc(s) : ( ( (us)==PlainMemory ) ? palloc(s) : MemoryContextAlloc(AggregateContext, (s)) ) )
#define MEMFREE(us, p) ( ((us)==SPIMemory) ? SPI_pfree(p) : pfree(p) )
#endif

View File

@ -266,8 +266,10 @@ calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
for (i = 0; i < size; i++)
{
float resj,wjm;
int4 jm;
float resj,
wjm;
int4 jm;
entry = find_wordentry(t, q, item[i]);
if (!entry)
continue;
@ -283,28 +285,29 @@ calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
post = POSNULL + 1;
}
resj = 0.0;
wjm = -1.0;
jm = 0;
for (j = 0; j < dimt; j++)
{
resj = resj + wpos(post[j])/((j+1)*(j+1));
if ( wpos(post[j]) > wjm ) {
wjm = wpos(post[j]);
jm = j;
}
}
/*
limit (sum(i/i^2),i->inf) = pi^2/6
resj = sum(wi/i^2),i=1,noccurence,
wi - should be sorted desc,
don't sort for now, just choose maximum weight. This should be corrected
resj = 0.0;
wjm = -1.0;
jm = 0;
for (j = 0; j < dimt; j++)
{
resj = resj + wpos(post[j]) / ((j + 1) * (j + 1));
if (wpos(post[j]) > wjm)
{
wjm = wpos(post[j]);
jm = j;
}
}
/*
limit (sum(i/i^2),i->inf) = pi^2/6
resj = sum(wi/i^2),i=1,noccurence,
wi - should be sorted desc,
don't sort for now, just choose maximum weight. This should be corrected
Oleg Bartunov
*/
res = res + ( wjm + resj - wjm/((jm+1)*(jm+1)))/1.64493406685;
res = res + (wjm + resj - wjm / ((jm + 1) * (jm + 1))) / 1.64493406685;
}
if ( size > 0 )
res = res /size;
if (size > 0)
res = res / size;
pfree(item);
return res;
}
@ -414,7 +417,7 @@ rank_def(PG_FUNCTION_ARGS)
typedef struct
{
ITEM **item;
ITEM **item;
int16 nitem;
bool needfree;
int32 pos;
@ -429,53 +432,59 @@ compareDocR(const void *a, const void *b)
}
static bool
checkcondition_ITEM(void *checkval, ITEM * val) {
return (bool)(val->istrue);
checkcondition_ITEM(void *checkval, ITEM * val)
{
return (bool) (val->istrue);
}
static void
reset_istrue_flag(QUERYTYPE *query) {
ITEM *item = GETQUERY(query);
int i;
reset_istrue_flag(QUERYTYPE * query)
{
ITEM *item = GETQUERY(query);
int i;
/* reset istrue flag */
for(i = 0; i < query->size; i++) {
if ( item->type == VAL )
for (i = 0; i < query->size; i++)
{
if (item->type == VAL)
item->istrue = 0;
item++;
}
}
static bool
Cover(DocRepresentation * doc, int len, QUERYTYPE * query, int *pos, int *p, int *q)
{
DocRepresentation *ptr;
int lastpos = *pos;
int i;
bool found=false;
int i;
bool found = false;
reset_istrue_flag(query);
*p = 0x7fffffff;
*q = 0;
ptr = doc + *pos;
/* find upper bound of cover from current position, move up */
while (ptr - doc < len) {
for(i=0;i<ptr->nitem;i++)
while (ptr - doc < len)
{
for (i = 0; i < ptr->nitem; i++)
ptr->item[i]->istrue = 1;
if ( TS_execute(GETQUERY(query), NULL, false, checkcondition_ITEM) ) {
if (ptr->pos > *q) {
if (TS_execute(GETQUERY(query), NULL, false, checkcondition_ITEM))
{
if (ptr->pos > *q)
{
*q = ptr->pos;
lastpos = ptr - doc;
found = true;
}
}
break;
}
ptr++;
}
if (!found)
if (!found)
return false;
reset_istrue_flag(query);
@ -483,25 +492,31 @@ Cover(DocRepresentation * doc, int len, QUERYTYPE * query, int *pos, int *p, int
ptr = doc + lastpos;
/* find lower bound of cover from founded upper bound, move down */
while (ptr >= doc ) {
for(i=0;i<ptr->nitem;i++)
while (ptr >= doc)
{
for (i = 0; i < ptr->nitem; i++)
ptr->item[i]->istrue = 1;
if ( TS_execute(GETQUERY(query), NULL, true, checkcondition_ITEM) ) {
if (ptr->pos < *p)
if (TS_execute(GETQUERY(query), NULL, true, checkcondition_ITEM))
{
if (ptr->pos < *p)
*p = ptr->pos;
break;
}
ptr--;
}
if ( *p <= *q ) {
/* set position for next try to next lexeme after begining of founded cover */
*pos= (ptr-doc) + 1;
if (*p <= *q)
{
/*
* set position for next try to next lexeme after begining of founded
* cover
*/
*pos = (ptr - doc) + 1;
return true;
}
(*pos)++;
return Cover( doc, len, query, pos, p, q );
return Cover(doc, len, query, pos, p, q);
}
static DocRepresentation *
@ -550,26 +565,32 @@ get_docrep(tsvector * txt, QUERYTYPE * query, int *doclen)
for (j = 0; j < dimt; j++)
{
if ( j == 0 ) {
ITEM *kptr, *iptr = item+i;
int k;
if (j == 0)
{
ITEM *kptr,
*iptr = item + i;
int k;
doc[cur].needfree = false;
doc[cur].nitem = 0;
doc[cur].item = (ITEM**)palloc( sizeof(ITEM*) * query->size );
doc[cur].item = (ITEM **) palloc(sizeof(ITEM *) * query->size);
for(k=0; k < query->size; k++) {
kptr = item+k;
if ( k==i || ( item[k].type == VAL && compareITEM( &kptr, &iptr ) == 0 ) ) {
doc[cur].item[ doc[cur].nitem ] = item+k;
for (k = 0; k < query->size; k++)
{
kptr = item + k;
if (k == i || (item[k].type == VAL && compareITEM(&kptr, &iptr) == 0))
{
doc[cur].item[doc[cur].nitem] = item + k;
doc[cur].nitem++;
kptr->istrue = 1;
}
}
} else {
}
}
else
{
doc[cur].needfree = false;
doc[cur].nitem = doc[cur-1].nitem;
doc[cur].item = doc[cur-1].item;
doc[cur].nitem = doc[cur - 1].nitem;
doc[cur].item = doc[cur - 1].item;
}
doc[cur].pos = WEP_GETPOS(post[j]);
cur++;
@ -604,7 +625,7 @@ rank_cd(PG_FUNCTION_ARGS)
len,
cur,
i,
doclen=0;
doclen = 0;
doc = get_docrep(txt, query, &doclen);
if (!doc)
@ -640,9 +661,9 @@ rank_cd(PG_FUNCTION_ARGS)
elog(ERROR, "unrecognized normalization method: %d", method);
}
for(i=0;i<doclen;i++)
if ( doc[i].needfree )
pfree( doc[i].item );
for (i = 0; i < doclen; i++)
if (doc[i].needfree)
pfree(doc[i].item);
pfree(doc);
PG_FREE_IF_COPY(txt, 1);
PG_FREE_IF_COPY(query, 2);
@ -784,9 +805,9 @@ get_covers(PG_FUNCTION_ARGS)
VARATT_SIZEP(out) = cptr - ((char *) out);
pfree(dw);
for(i=0;i<rlen;i++)
if ( doc[i].needfree )
pfree( doc[i].item );
for (i = 0; i < rlen; i++)
if (doc[i].needfree)
pfree(doc[i].item);
pfree(doc);
PG_FREE_IF_COPY(txt, 0);

View File

@ -23,7 +23,7 @@ struct SN_env
int S_size;
int I_size;
int B_size;
symbol **S;
symbol **S;
int *I;
symbol *B;
};

View File

@ -28,8 +28,8 @@ static symbol s_0_1[5] = {'g', 'e', 'n', 'e', 'r'};
static struct among a_0[2] =
{
/* 0 */ {6, s_0_0, -1, -1, 0},
/* 1 */ {5, s_0_1, -1, -1, 0}
/* 0 */ {6, s_0_0, -1, -1, 0},
/* 1 */ {5, s_0_1, -1, -1, 0}
};
static symbol s_1_0[1] = {'\''};
@ -38,9 +38,9 @@ static symbol s_1_2[2] = {'\'', 's'};
static struct among a_1[3] =
{
/* 0 */ {1, s_1_0, -1, 1, 0},
/* 1 */ {3, s_1_1, 0, 1, 0},
/* 2 */ {2, s_1_2, -1, 1, 0}
/* 0 */ {1, s_1_0, -1, 1, 0},
/* 1 */ {3, s_1_1, 0, 1, 0},
/* 2 */ {2, s_1_2, -1, 1, 0}
};
static symbol s_2_0[3] = {'i', 'e', 'd'};
@ -52,12 +52,12 @@ static symbol s_2_5[2] = {'u', 's'};
static struct among a_2[6] =
{
/* 0 */ {3, s_2_0, -1, 2, 0},
/* 1 */ {1, s_2_1, -1, 3, 0},
/* 2 */ {3, s_2_2, 1, 2, 0},
/* 3 */ {4, s_2_3, 1, 1, 0},
/* 4 */ {2, s_2_4, 1, -1, 0},
/* 5 */ {2, s_2_5, 1, -1, 0}
/* 0 */ {3, s_2_0, -1, 2, 0},
/* 1 */ {1, s_2_1, -1, 3, 0},
/* 2 */ {3, s_2_2, 1, 2, 0},
/* 3 */ {4, s_2_3, 1, 1, 0},
/* 4 */ {2, s_2_4, 1, -1, 0},
/* 5 */ {2, s_2_5, 1, -1, 0}
};
static symbol s_3_1[2] = {'b', 'b'};
@ -75,16 +75,16 @@ static symbol s_3_12[2] = {'i', 'z'};
static struct among a_3[13] =
{
/* 0 */ {0, 0, -1, 3, 0},
/* 1 */ {2, s_3_1, 0, 2, 0},
/* 2 */ {2, s_3_2, 0, 2, 0},
/* 3 */ {2, s_3_3, 0, 2, 0},
/* 4 */ {2, s_3_4, 0, 2, 0},
/* 5 */ {2, s_3_5, 0, 1, 0},
/* 6 */ {2, s_3_6, 0, 2, 0},
/* 7 */ {2, s_3_7, 0, 2, 0},
/* 8 */ {2, s_3_8, 0, 2, 0},
/* 9 */ {2, s_3_9, 0, 2, 0},
/* 0 */ {0, 0, -1, 3, 0},
/* 1 */ {2, s_3_1, 0, 2, 0},
/* 2 */ {2, s_3_2, 0, 2, 0},
/* 3 */ {2, s_3_3, 0, 2, 0},
/* 4 */ {2, s_3_4, 0, 2, 0},
/* 5 */ {2, s_3_5, 0, 1, 0},
/* 6 */ {2, s_3_6, 0, 2, 0},
/* 7 */ {2, s_3_7, 0, 2, 0},
/* 8 */ {2, s_3_8, 0, 2, 0},
/* 9 */ {2, s_3_9, 0, 2, 0},
/* 10 */ {2, s_3_10, 0, 1, 0},
/* 11 */ {2, s_3_11, 0, 2, 0},
/* 12 */ {2, s_3_12, 0, 1, 0}
@ -99,12 +99,12 @@ static symbol s_4_5[5] = {'i', 'n', 'g', 'l', 'y'};
static struct among a_4[6] =
{
/* 0 */ {2, s_4_0, -1, 2, 0},
/* 1 */ {3, s_4_1, 0, 1, 0},
/* 2 */ {3, s_4_2, -1, 2, 0},
/* 3 */ {4, s_4_3, -1, 2, 0},
/* 4 */ {5, s_4_4, 3, 1, 0},
/* 5 */ {5, s_4_5, -1, 2, 0}
/* 0 */ {2, s_4_0, -1, 2, 0},
/* 1 */ {3, s_4_1, 0, 1, 0},
/* 2 */ {3, s_4_2, -1, 2, 0},
/* 3 */ {4, s_4_3, -1, 2, 0},
/* 4 */ {5, s_4_4, 3, 1, 0},
/* 5 */ {5, s_4_5, -1, 2, 0}
};
static symbol s_5_0[4] = {'a', 'n', 'c', 'i'};
@ -134,16 +134,16 @@ static symbol s_5_23[7] = {'o', 'u', 's', 'n', 'e', 's', 's'};
static struct among a_5[24] =
{
/* 0 */ {4, s_5_0, -1, 3, 0},
/* 1 */ {4, s_5_1, -1, 2, 0},
/* 2 */ {3, s_5_2, -1, 13, 0},
/* 3 */ {2, s_5_3, -1, 16, 0},
/* 4 */ {3, s_5_4, 3, 12, 0},
/* 5 */ {4, s_5_5, 4, 4, 0},
/* 6 */ {4, s_5_6, 3, 8, 0},
/* 7 */ {5, s_5_7, 3, 14, 0},
/* 8 */ {6, s_5_8, 3, 15, 0},
/* 9 */ {5, s_5_9, 3, 10, 0},
/* 0 */ {4, s_5_0, -1, 3, 0},
/* 1 */ {4, s_5_1, -1, 2, 0},
/* 2 */ {3, s_5_2, -1, 13, 0},
/* 3 */ {2, s_5_3, -1, 16, 0},
/* 4 */ {3, s_5_4, 3, 12, 0},
/* 5 */ {4, s_5_5, 4, 4, 0},
/* 6 */ {4, s_5_6, 3, 8, 0},
/* 7 */ {5, s_5_7, 3, 14, 0},
/* 8 */ {6, s_5_8, 3, 15, 0},
/* 9 */ {5, s_5_9, 3, 10, 0},
/* 10 */ {5, s_5_10, 3, 5, 0},
/* 11 */ {5, s_5_11, -1, 8, 0},
/* 12 */ {6, s_5_12, -1, 12, 0},
@ -172,15 +172,15 @@ static symbol s_6_8[4] = {'n', 'e', 's', 's'};
static struct among a_6[9] =
{
/* 0 */ {5, s_6_0, -1, 4, 0},
/* 1 */ {5, s_6_1, -1, 6, 0},
/* 2 */ {5, s_6_2, -1, 3, 0},
/* 3 */ {5, s_6_3, -1, 4, 0},
/* 4 */ {4, s_6_4, -1, 4, 0},
/* 5 */ {6, s_6_5, -1, 1, 0},
/* 6 */ {7, s_6_6, 5, 2, 0},
/* 7 */ {3, s_6_7, -1, 5, 0},
/* 8 */ {4, s_6_8, -1, 5, 0}
/* 0 */ {5, s_6_0, -1, 4, 0},
/* 1 */ {5, s_6_1, -1, 6, 0},
/* 2 */ {5, s_6_2, -1, 3, 0},
/* 3 */ {5, s_6_3, -1, 4, 0},
/* 4 */ {4, s_6_4, -1, 4, 0},
/* 5 */ {6, s_6_5, -1, 1, 0},
/* 6 */ {7, s_6_6, 5, 2, 0},
/* 7 */ {3, s_6_7, -1, 5, 0},
/* 8 */ {4, s_6_8, -1, 5, 0}
};
static symbol s_7_0[2] = {'i', 'c'};
@ -204,16 +204,16 @@ static symbol s_7_17[5] = {'e', 'm', 'e', 'n', 't'};
static struct among a_7[18] =
{
/* 0 */ {2, s_7_0, -1, 1, 0},
/* 1 */ {4, s_7_1, -1, 1, 0},
/* 2 */ {4, s_7_2, -1, 1, 0},
/* 3 */ {4, s_7_3, -1, 1, 0},
/* 4 */ {4, s_7_4, -1, 1, 0},
/* 5 */ {3, s_7_5, -1, 1, 0},
/* 6 */ {3, s_7_6, -1, 1, 0},
/* 7 */ {3, s_7_7, -1, 1, 0},
/* 8 */ {3, s_7_8, -1, 1, 0},
/* 9 */ {2, s_7_9, -1, 1, 0},
/* 0 */ {2, s_7_0, -1, 1, 0},
/* 1 */ {4, s_7_1, -1, 1, 0},
/* 2 */ {4, s_7_2, -1, 1, 0},
/* 3 */ {4, s_7_3, -1, 1, 0},
/* 4 */ {4, s_7_4, -1, 1, 0},
/* 5 */ {3, s_7_5, -1, 1, 0},
/* 6 */ {3, s_7_6, -1, 1, 0},
/* 7 */ {3, s_7_7, -1, 1, 0},
/* 8 */ {3, s_7_8, -1, 1, 0},
/* 9 */ {2, s_7_9, -1, 1, 0},
/* 10 */ {3, s_7_10, -1, 1, 0},
/* 11 */ {3, s_7_11, -1, 2, 0},
/* 12 */ {2, s_7_12, -1, 1, 0},
@ -229,8 +229,8 @@ static symbol s_8_1[1] = {'l'};
static struct among a_8[2] =
{
/* 0 */ {1, s_8_0, -1, 1, 0},
/* 1 */ {1, s_8_1, -1, 2, 0}
/* 0 */ {1, s_8_0, -1, 1, 0},
/* 1 */ {1, s_8_1, -1, 2, 0}
};
static symbol s_9_0[7] = {'s', 'u', 'c', 'c', 'e', 'e', 'd'};
@ -244,14 +244,14 @@ static symbol s_9_7[6] = {'o', 'u', 't', 'i', 'n', 'g'};
static struct among a_9[8] =
{
/* 0 */ {7, s_9_0, -1, -1, 0},
/* 1 */ {7, s_9_1, -1, -1, 0},
/* 2 */ {6, s_9_2, -1, -1, 0},
/* 3 */ {7, s_9_3, -1, -1, 0},
/* 4 */ {6, s_9_4, -1, -1, 0},
/* 5 */ {7, s_9_5, -1, -1, 0},
/* 6 */ {7, s_9_6, -1, -1, 0},
/* 7 */ {6, s_9_7, -1, -1, 0}
/* 0 */ {7, s_9_0, -1, -1, 0},
/* 1 */ {7, s_9_1, -1, -1, 0},
/* 2 */ {6, s_9_2, -1, -1, 0},
/* 3 */ {7, s_9_3, -1, -1, 0},
/* 4 */ {6, s_9_4, -1, -1, 0},
/* 5 */ {7, s_9_5, -1, -1, 0},
/* 6 */ {7, s_9_6, -1, -1, 0},
/* 7 */ {6, s_9_7, -1, -1, 0}
};
static symbol s_10_0[5] = {'a', 'n', 'd', 'e', 's'};
@ -275,16 +275,16 @@ static symbol s_10_17[4] = {'u', 'g', 'l', 'y'};
static struct among a_10[18] =
{
/* 0 */ {5, s_10_0, -1, -1, 0},
/* 1 */ {5, s_10_1, -1, -1, 0},
/* 2 */ {4, s_10_2, -1, -1, 0},
/* 3 */ {6, s_10_3, -1, -1, 0},
/* 4 */ {5, s_10_4, -1, 3, 0},
/* 5 */ {5, s_10_5, -1, 9, 0},
/* 6 */ {6, s_10_6, -1, 7, 0},
/* 7 */ {4, s_10_7, -1, -1, 0},
/* 8 */ {4, s_10_8, -1, 6, 0},
/* 9 */ {5, s_10_9, -1, 4, 0},
/* 0 */ {5, s_10_0, -1, -1, 0},
/* 1 */ {5, s_10_1, -1, -1, 0},
/* 2 */ {4, s_10_2, -1, -1, 0},
/* 3 */ {6, s_10_3, -1, -1, 0},
/* 4 */ {5, s_10_4, -1, 3, 0},
/* 5 */ {5, s_10_5, -1, 9, 0},
/* 6 */ {6, s_10_6, -1, 7, 0},
/* 7 */ {4, s_10_7, -1, -1, 0},
/* 8 */ {4, s_10_8, -1, 6, 0},
/* 9 */ {5, s_10_9, -1, 4, 0},
/* 10 */ {4, s_10_10, -1, -1, 0},
/* 11 */ {4, s_10_11, -1, 10, 0},
/* 12 */ {6, s_10_12, -1, 11, 0},
@ -1609,12 +1609,14 @@ lab0:
return 1;
}
extern struct SN_env *english_ISO_8859_1_create_env(void)
extern struct SN_env *
english_ISO_8859_1_create_env(void)
{
return SN_create_env(0, 2, 1);
}
extern void english_ISO_8859_1_close_env(struct SN_env * z)
extern void
english_ISO_8859_1_close_env(struct SN_env * z)
{
SN_close_env(z);
}

View File

@ -6,10 +6,10 @@ extern "C"
{
#endif
extern struct SN_env *english_ISO_8859_1_create_env(void);
extern void english_ISO_8859_1_close_env(struct SN_env * z);
extern struct SN_env *english_ISO_8859_1_create_env(void);
extern void english_ISO_8859_1_close_env(struct SN_env * z);
extern int english_ISO_8859_1_stem(struct SN_env * z);
extern int english_ISO_8859_1_stem(struct SN_env * z);
#ifdef __cplusplus
}

View File

@ -30,15 +30,15 @@ static symbol s_0_8[6] = {0xD9, 0xD7, 0xDB, 0xC9, 0xD3, 0xD8};
static struct among a_0[9] =
{
/* 0 */ {3, s_0_0, -1, 1, 0},
/* 1 */ {4, s_0_1, 0, 2, 0},
/* 2 */ {4, s_0_2, 0, 2, 0},
/* 3 */ {1, s_0_3, -1, 1, 0},
/* 4 */ {2, s_0_4, 3, 2, 0},
/* 5 */ {2, s_0_5, 3, 2, 0},
/* 6 */ {5, s_0_6, -1, 1, 0},
/* 7 */ {6, s_0_7, 6, 2, 0},
/* 8 */ {6, s_0_8, 6, 2, 0}
/* 0 */ {3, s_0_0, -1, 1, 0},
/* 1 */ {4, s_0_1, 0, 2, 0},
/* 2 */ {4, s_0_2, 0, 2, 0},
/* 3 */ {1, s_0_3, -1, 1, 0},
/* 4 */ {2, s_0_4, 3, 2, 0},
/* 5 */ {2, s_0_5, 3, 2, 0},
/* 6 */ {5, s_0_6, -1, 1, 0},
/* 7 */ {6, s_0_7, 6, 2, 0},
/* 8 */ {6, s_0_8, 6, 2, 0}
};
static symbol s_1_0[2] = {0xC0, 0xC0};
@ -70,16 +70,16 @@ static symbol s_1_25[3] = {0xCF, 0xCD, 0xD5};
static struct among a_1[26] =
{
/* 0 */ {2, s_1_0, -1, 1, 0},
/* 1 */ {2, s_1_1, -1, 1, 0},
/* 2 */ {2, s_1_2, -1, 1, 0},
/* 3 */ {2, s_1_3, -1, 1, 0},
/* 4 */ {2, s_1_4, -1, 1, 0},
/* 5 */ {2, s_1_5, -1, 1, 0},
/* 6 */ {2, s_1_6, -1, 1, 0},
/* 7 */ {2, s_1_7, -1, 1, 0},
/* 8 */ {2, s_1_8, -1, 1, 0},
/* 9 */ {2, s_1_9, -1, 1, 0},
/* 0 */ {2, s_1_0, -1, 1, 0},
/* 1 */ {2, s_1_1, -1, 1, 0},
/* 2 */ {2, s_1_2, -1, 1, 0},
/* 3 */ {2, s_1_3, -1, 1, 0},
/* 4 */ {2, s_1_4, -1, 1, 0},
/* 5 */ {2, s_1_5, -1, 1, 0},
/* 6 */ {2, s_1_6, -1, 1, 0},
/* 7 */ {2, s_1_7, -1, 1, 0},
/* 8 */ {2, s_1_8, -1, 1, 0},
/* 9 */ {2, s_1_9, -1, 1, 0},
/* 10 */ {3, s_1_10, -1, 1, 0},
/* 11 */ {3, s_1_11, -1, 1, 0},
/* 12 */ {2, s_1_12, -1, 1, 0},
@ -109,14 +109,14 @@ static symbol s_2_7[3] = {0xD5, 0xC0, 0xDD};
static struct among a_2[8] =
{
/* 0 */ {2, s_2_0, -1, 1, 0},
/* 1 */ {2, s_2_1, -1, 1, 0},
/* 2 */ {2, s_2_2, -1, 1, 0},
/* 3 */ {3, s_2_3, 2, 2, 0},
/* 4 */ {3, s_2_4, 2, 2, 0},
/* 5 */ {1, s_2_5, -1, 1, 0},
/* 6 */ {2, s_2_6, 5, 1, 0},
/* 7 */ {3, s_2_7, 6, 2, 0}
/* 0 */ {2, s_2_0, -1, 1, 0},
/* 1 */ {2, s_2_1, -1, 1, 0},
/* 2 */ {2, s_2_2, -1, 1, 0},
/* 3 */ {3, s_2_3, 2, 2, 0},
/* 4 */ {3, s_2_4, 2, 2, 0},
/* 5 */ {1, s_2_5, -1, 1, 0},
/* 6 */ {2, s_2_6, 5, 1, 0},
/* 7 */ {3, s_2_7, 6, 2, 0}
};
static symbol s_3_0[2] = {0xD3, 0xD1};
@ -124,8 +124,8 @@ static symbol s_3_1[2] = {0xD3, 0xD8};
static struct among a_3[2] =
{
/* 0 */ {2, s_3_0, -1, 1, 0},
/* 1 */ {2, s_3_1, -1, 1, 0}
/* 0 */ {2, s_3_0, -1, 1, 0},
/* 1 */ {2, s_3_1, -1, 1, 0}
};
static symbol s_4_0[1] = {0xC0};
@ -177,16 +177,16 @@ static symbol s_4_45[3] = {0xC5, 0xCE, 0xD9};
static struct among a_4[46] =
{
/* 0 */ {1, s_4_0, -1, 2, 0},
/* 1 */ {2, s_4_1, 0, 2, 0},
/* 2 */ {2, s_4_2, -1, 1, 0},
/* 3 */ {3, s_4_3, 2, 2, 0},
/* 4 */ {3, s_4_4, 2, 2, 0},
/* 5 */ {2, s_4_5, -1, 1, 0},
/* 6 */ {3, s_4_6, 5, 2, 0},
/* 7 */ {3, s_4_7, -1, 1, 0},
/* 8 */ {3, s_4_8, -1, 2, 0},
/* 9 */ {3, s_4_9, -1, 1, 0},
/* 0 */ {1, s_4_0, -1, 2, 0},
/* 1 */ {2, s_4_1, 0, 2, 0},
/* 2 */ {2, s_4_2, -1, 1, 0},
/* 3 */ {3, s_4_3, 2, 2, 0},
/* 4 */ {3, s_4_4, 2, 2, 0},
/* 5 */ {2, s_4_5, -1, 1, 0},
/* 6 */ {3, s_4_6, 5, 2, 0},
/* 7 */ {3, s_4_7, -1, 1, 0},
/* 8 */ {3, s_4_8, -1, 2, 0},
/* 9 */ {3, s_4_9, -1, 1, 0},
/* 10 */ {4, s_4_10, 9, 2, 0},
/* 11 */ {4, s_4_11, 9, 2, 0},
/* 12 */ {2, s_4_12, -1, 1, 0},
@ -264,16 +264,16 @@ static symbol s_5_35[1] = {0xD9};
static struct among a_5[36] =
{
/* 0 */ {1, s_5_0, -1, 1, 0},
/* 1 */ {2, s_5_1, 0, 1, 0},
/* 2 */ {2, s_5_2, 0, 1, 0},
/* 3 */ {1, s_5_3, -1, 1, 0},
/* 4 */ {1, s_5_4, -1, 1, 0},
/* 5 */ {2, s_5_5, 4, 1, 0},
/* 6 */ {2, s_5_6, 4, 1, 0},
/* 7 */ {2, s_5_7, -1, 1, 0},
/* 8 */ {2, s_5_8, -1, 1, 0},
/* 9 */ {3, s_5_9, 8, 1, 0},
/* 0 */ {1, s_5_0, -1, 1, 0},
/* 1 */ {2, s_5_1, 0, 1, 0},
/* 2 */ {2, s_5_2, 0, 1, 0},
/* 3 */ {1, s_5_3, -1, 1, 0},
/* 4 */ {1, s_5_4, -1, 1, 0},
/* 5 */ {2, s_5_5, 4, 1, 0},
/* 6 */ {2, s_5_6, 4, 1, 0},
/* 7 */ {2, s_5_7, -1, 1, 0},
/* 8 */ {2, s_5_8, -1, 1, 0},
/* 9 */ {3, s_5_9, 8, 1, 0},
/* 10 */ {1, s_5_10, -1, 1, 0},
/* 11 */ {2, s_5_11, 10, 1, 0},
/* 12 */ {2, s_5_12, 10, 1, 0},
@ -307,8 +307,8 @@ static symbol s_6_1[4] = {0xCF, 0xD3, 0xD4, 0xD8};
static struct among a_6[2] =
{
/* 0 */ {3, s_6_0, -1, 1, 0},
/* 1 */ {4, s_6_1, -1, 1, 0}
/* 0 */ {3, s_6_0, -1, 1, 0},
/* 1 */ {4, s_6_1, -1, 1, 0}
};
static symbol s_7_0[4] = {0xC5, 0xCA, 0xDB, 0xC5};
@ -318,10 +318,10 @@ static symbol s_7_3[3] = {0xC5, 0xCA, 0xDB};
static struct among a_7[4] =
{
/* 0 */ {4, s_7_0, -1, 1, 0},
/* 1 */ {1, s_7_1, -1, 2, 0},
/* 2 */ {1, s_7_2, -1, 3, 0},
/* 3 */ {3, s_7_3, -1, 1, 0}
/* 0 */ {4, s_7_0, -1, 1, 0},
/* 1 */ {1, s_7_1, -1, 2, 0},
/* 2 */ {1, s_7_2, -1, 3, 0},
/* 3 */ {3, s_7_3, -1, 1, 0}
};
static unsigned char g_v[] = {35, 130, 34, 18};
@ -915,12 +915,14 @@ lab0:
return 1;
}
extern struct SN_env *russian_KOI8_R_create_env(void)
extern struct SN_env *
russian_KOI8_R_create_env(void)
{
return SN_create_env(0, 2, 0);
}
extern void russian_KOI8_R_close_env(struct SN_env * z)
extern void
russian_KOI8_R_close_env(struct SN_env * z)
{
SN_close_env(z);
}

View File

@ -6,10 +6,10 @@ extern "C"
{
#endif
extern struct SN_env *russian_KOI8_R_create_env(void);
extern void russian_KOI8_R_close_env(struct SN_env * z);
extern struct SN_env *russian_KOI8_R_create_env(void);
extern void russian_KOI8_R_close_env(struct SN_env * z);
extern int russian_KOI8_R_stem(struct SN_env * z);
extern int russian_KOI8_R_stem(struct SN_env * z);
#ifdef __cplusplus
}

View File

@ -8,58 +8,64 @@
#if defined(TS_USE_WIDE) && defined(WIN32)
size_t
wchar2char( char *to, const wchar_t *from, size_t len ) {
if (GetDatabaseEncoding() == PG_UTF8) {
int r, nbytes;
wchar2char(char *to, const wchar_t *from, size_t len)
{
if (GetDatabaseEncoding() == PG_UTF8)
{
int r,
nbytes;
if (len==0)
if (len == 0)
return 0;
/* in any case, *to should be allocated with enough space */
nbytes = WideCharToMultiByte(CP_UTF8, 0, from, len, NULL, 0, NULL, NULL);
if ( nbytes==0 )
if (nbytes == 0)
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("UTF-16 to UTF-8 translation failed: %lu",
GetLastError())));
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("UTF-16 to UTF-8 translation failed: %lu",
GetLastError())));
r = WideCharToMultiByte(CP_UTF8, 0, from, len, to, nbytes,
NULL, NULL);
NULL, NULL);
if ( r==0 )
if (r == 0)
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("UTF-16 to UTF-8 translation failed: %lu",
GetLastError())));
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("UTF-16 to UTF-8 translation failed: %lu",
GetLastError())));
return r;
}
return wcstombs(to, from, len);
}
size_t
char2wchar( wchar_t *to, const char *from, size_t len ) {
if (GetDatabaseEncoding() == PG_UTF8) {
int r;
size_t
char2wchar(wchar_t *to, const char *from, size_t len)
{
if (GetDatabaseEncoding() == PG_UTF8)
{
int r;
if (len==0)
if (len == 0)
return 0;
r = MultiByteToWideChar(CP_UTF8, 0, from, len, to, len);
if (!r) {
if (!r)
{
pg_verifymbstr(from, len, false);
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("invalid multibyte character for locale"),
errhint("The server's LC_CTYPE locale is probably incompatible with the database encoding.")));
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("invalid multibyte character for locale"),
errhint("The server's LC_CTYPE locale is probably incompatible with the database encoding.")));
}
Assert( r <= len );
Assert(r <= len);
return r;
}
return mbstowcs(to, from, len);
}

View File

@ -22,17 +22,15 @@
#ifdef WIN32
size_t wchar2char( char *to, const wchar_t *from, size_t len );
size_t char2wchar( wchar_t *to, const char *from, size_t len );
#else /* WIN32 */
size_t wchar2char(char *to, const wchar_t *from, size_t len);
size_t char2wchar(wchar_t *to, const char *from, size_t len);
#else /* WIN32 */
/* correct mbstowcs */
#define char2wchar mbstowcs
#define wchar2char wcstombs
#endif /* WIN32 */
#endif /* defined(HAVE_WCSTOMBS) &&
* defined(HAVE_TOWLOWER) */
#endif /* WIN32 */
#endif /* defined(HAVE_WCSTOMBS) && defined(HAVE_TOWLOWER) */
#endif /* __TSLOCALE_H__ */
#endif /* __TSLOCALE_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,8 @@
#include <limits.h>
#include "ts_locale.h"
typedef enum {
typedef enum
{
TPS_Base = 0,
TPS_InUWord,
TPS_InLatWord,
@ -78,70 +79,76 @@ typedef enum {
TPS_InHDecimalPart,
TPS_InHVersionPartFirst,
TPS_InHVersionPart,
TPS_Null /* last state (fake value) */
} TParserState;
TPS_Null /* last state (fake value) */
} TParserState;
/* forward declaration */
struct TParser;
typedef int (*TParserCharTest)(struct TParser*); /* any p_is* functions except p_iseq */
typedef void (*TParserSpecial)(struct TParser*); /* special handler for special cases... */
typedef int (*TParserCharTest) (struct TParser *); /* any p_is* functions
* except p_iseq */
typedef void (*TParserSpecial) (struct TParser *); /* special handler for
* special cases... */
typedef struct {
TParserCharTest isclass;
char c;
uint16 flags;
TParserState tostate;
int type;
TParserSpecial special;
} TParserStateActionItem;
typedef struct
{
TParserCharTest isclass;
char c;
uint16 flags;
TParserState tostate;
int type;
TParserSpecial special;
} TParserStateActionItem;
typedef struct {
TParserState state;
TParserStateActionItem *action;
} TParserStateAction;
typedef struct
{
TParserState state;
TParserStateActionItem *action;
} TParserStateAction;
typedef struct TParserPosition {
int posbyte; /* position of parser in bytes */
int poschar; /* osition of parser in characters */
int charlen; /* length of current char */
int lenbytelexeme;
int lencharlexeme;
TParserState state;
struct TParserPosition *prev;
int flags;
TParserStateActionItem *pushedAtAction;
} TParserPosition;
typedef struct TParserPosition
{
int posbyte; /* position of parser in bytes */
int poschar; /* osition of parser in characters */
int charlen; /* length of current char */
int lenbytelexeme;
int lencharlexeme;
TParserState state;
struct TParserPosition *prev;
int flags;
TParserStateActionItem *pushedAtAction;
} TParserPosition;
typedef struct TParser {
typedef struct TParser
{
/* string and position information */
char *str; /* multibyte string */
int lenstr; /* length of mbstring */
wchar_t *wstr; /* wide character string */
int lenwstr; /* length of wsting */
char *str; /* multibyte string */
int lenstr; /* length of mbstring */
wchar_t *wstr; /* wide character string */
int lenwstr; /* length of wsting */
/* State of parse */
int charmaxlen;
int charmaxlen;
bool usewide;
TParserPosition *state;
TParserPosition *state;
bool ignore;
bool wanthost;
/* silly char */
char c;
char c;
/* out */
char *lexeme;
int lenbytelexeme;
int lencharlexeme;
int type;
} TParser;
char *lexeme;
int lenbytelexeme;
int lencharlexeme;
int type;
} TParser;
TParser* TParserInit( char *, int );
bool TParserGet( TParser* );
void TParserClose( TParser* );
TParser *TParserInit(char *, int);
bool TParserGet(TParser *);
void TParserClose(TParser *);
#endif

View File

@ -39,7 +39,7 @@ Datum prsd_start(PG_FUNCTION_ARGS);
Datum
prsd_start(PG_FUNCTION_ARGS)
{
PG_RETURN_POINTER(TParserInit( (char *) PG_GETARG_POINTER(0), PG_GETARG_INT32(1)));
PG_RETURN_POINTER(TParserInit((char *) PG_GETARG_POINTER(0), PG_GETARG_INT32(1)));
}
PG_FUNCTION_INFO_V1(prsd_getlexeme);
@ -47,14 +47,14 @@ Datum prsd_getlexeme(PG_FUNCTION_ARGS);
Datum
prsd_getlexeme(PG_FUNCTION_ARGS)
{
TParser *p=(TParser*)PG_GETARG_POINTER(0);
TParser *p = (TParser *) PG_GETARG_POINTER(0);
char **t = (char **) PG_GETARG_POINTER(1);
int *tlen = (int *) PG_GETARG_POINTER(2);
if ( !TParserGet(p) )
if (!TParserGet(p))
PG_RETURN_INT32(0);
*t = p->lexeme;
*t = p->lexeme;
*tlen = p->lenbytelexeme;
PG_RETURN_INT32(p->type);
@ -65,8 +65,9 @@ Datum prsd_end(PG_FUNCTION_ARGS);
Datum
prsd_end(PG_FUNCTION_ARGS)
{
TParser *p=(TParser*)PG_GETARG_POINTER(0);
TParserClose(p);
TParser *p = (TParser *) PG_GETARG_POINTER(0);
TParserClose(p);
PG_RETURN_VOID();
}

View File

@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.103 2005/11/20 19:49:06 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.104 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -512,11 +512,11 @@ nocachegetattr(HeapTuple tuple,
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
* Note - This loop is a little tricky. For each non-null attribute, we
* have to first account for alignment padding before the attr, then
* advance over the attr based on its length. Nulls have no storage
* and no alignment padding either. We can use/set attcacheoff until
* we pass either a null or a var-width attribute.
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
* then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we pass either a null or a var-width attribute.
*/
for (i = 0; i < attnum; i++)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112 2005/10/15 02:49:08 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.113 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@ -49,8 +49,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
* Allocate enough memory for the tuple descriptor, including the
* attribute rows, and set up the attribute row pointers.
*
* Note: we assume that sizeof(struct tupleDesc) is a multiple of the struct
* pointer alignment requirement, and hence we don't need to insert
* Note: we assume that sizeof(struct tupleDesc) is a multiple of the
* struct pointer alignment requirement, and hence we don't need to insert
* alignment padding between the struct and the array of attribute row
* pointers.
*/

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.53 2005/11/06 22:39:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.54 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -396,8 +396,8 @@ gistindex_keytest(IndexTuple tuple,
* are the index datum (as a GISTENTRY*), the comparison datum, and
* the comparison operator's strategy number and subtype from pg_amop.
*
* (Presently there's no need to pass the subtype since it'll always be
* zero, but might as well pass it for possible future use.)
* (Presently there's no need to pass the subtype since it'll always
* be zero, but might as well pass it for possible future use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.8 2005/11/06 22:39:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.9 2005/11/22 18:17:05 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@ -877,16 +877,17 @@ gistcheckpage(Relation rel, Buffer buf)
Page page = BufferGetPage(buf);
/*
* ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
* which means it either contains a reasonably sane page header or is
* all-zero. We have to defend against the all-zero case, however.
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" contains unexpected zero page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errmsg("index \"%s\" contains unexpected zero page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
/*
@ -925,6 +926,7 @@ gistNewBuffer(Relation r)
break; /* nothing left in FSM */
buffer = ReadBuffer(r, blkno);
/*
* We have to guard against the possibility that someone else already
* recycled this page; the buffer may be locked if so.

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.10 2005/11/06 22:39:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.11 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -65,6 +65,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
lencompleted = 16;
buffer = ReadBuffer(gv->index, blkno);
/*
* This is only used during VACUUM FULL, so we need not bother to lock
* individual index pages

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.48 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.49 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@ -488,9 +488,9 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
* It is okay to write-lock the new bitmap page while holding metapage
* write lock, because no one else could be contending for the new page.
*
* There is some loss of concurrency in possibly doing I/O for the new page
* while holding the metapage lock, but this path is taken so seldom that
* it's not worth worrying about.
* There is some loss of concurrency in possibly doing I/O for the new
* page while holding the metapage lock, but this path is taken so seldom
* that it's not worth worrying about.
*/
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
pg = BufferGetPage(buf);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.53 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.54 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@ -402,8 +402,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
* Ideally we would lock the new bucket too before proceeding, but if we are
* about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* Ideally we would lock the new bucket too before proceeding, but if we
* are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* correct yet. For simplicity we update the metapage first and then
* lock. This should be okay because no one else should be trying to lock
* the new bucket yet...
@ -422,11 +422,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared buffer,
* any failure in this next little bit leaves us with a big problem: the
* metapage is effectively corrupt but could get written back to disk. We
* don't really expect any failure, but just to be sure, establish a
* critical section.
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
* problem: the metapage is effectively corrupt but could get written back
* to disk. We don't really expect any failure, but just to be sure,
* establish a critical section.
*/
START_CRIT_SECTION();

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.43 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.44 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -115,23 +115,24 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
Page page = BufferGetPage(buf);
/*
* ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
* which means it either contains a reasonably sane page header or is
* all-zero. We have to defend against the all-zero case, however.
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" contains unexpected zero page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errmsg("index \"%s\" contains unexpected zero page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
/*
* Additionally check that the special area looks sane.
*/
if (((PageHeader) (page))->pd_special !=
(BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData))))
(BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData))))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" contains corrupted page at block %u",
@ -146,9 +147,9 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
if ((opaque->hasho_flag & flags) == 0)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" contains corrupted page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errmsg("index \"%s\" contains corrupted page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.202 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.203 2005/11/22 18:17:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -1080,7 +1080,7 @@ heap_get_latest_tid(Relation relation,
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
* TID where the tuple was stored. But note that any toasting of fields
* TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@ -1127,8 +1127,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* If the new tuple is too big for storage or contains already toasted
* out-of-line attributes from some other relation, invoke the toaster.
*
* Note: below this point, heaptup is the data we actually intend to
* store into the relation; tup is the caller's original untoasted data.
* Note: below this point, heaptup is the data we actually intend to store
* into the relation; tup is the caller's original untoasted data.
*/
if (HeapTupleHasExternal(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
@ -1215,8 +1215,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If tuple is cachable, mark it for invalidation from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
* buffer, because the heaptup data structure is all in local memory,
* not in the shared buffer.
* buffer, because the heaptup data structure is all in local memory, not
* in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, heaptup);
@ -1323,8 +1323,8 @@ l1:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
* If we are forced to "start over" below, we keep the tuple lock; this
* arranges that we stay at the head of the line while rechecking
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@ -1567,7 +1567,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
*
* On success, the header fields of *newtup are updated to match the new
* stored tuple; in particular, newtup->t_self is set to the TID where the
* new tuple was inserted. However, any TOAST changes in the new tuple's
* new tuple was inserted. However, any TOAST changes in the new tuple's
* data are not reflected into *newtup.
*
* In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
@ -1638,8 +1638,8 @@ l2:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
* If we are forced to "start over" below, we keep the tuple lock; this
* arranges that we stay at the head of the line while rechecking
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@ -1771,8 +1771,8 @@ l2:
* show that it's already being updated, else other processes may try to
* update it themselves.
*
* We need to invoke the toaster if there are already any out-of-line toasted
* values present, or if the new tuple is over-threshold.
* We need to invoke the toaster if there are already any out-of-line
* toasted values present, or if the new tuple is over-threshold.
*/
newtupsize = MAXALIGN(newtup->t_len);
@ -1875,7 +1875,7 @@ l2:
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
if (!already_marked)
{
@ -2111,8 +2111,8 @@ l3:
* LockTuple will release us when we are next-in-line for the tuple.
* We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock; this
* arranges that we stay at the head of the line while rechecking
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58 2005/10/15 02:49:08 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.59 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -296,11 +296,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Remember the new page as our target for future insertions.
*
* XXX should we enter the new page into the free space map immediately, or
* just keep it for this backend's exclusive use in the short run (until
* VACUUM sees it)? Seems to depend on whether you expect the current
* backend to make more insertions or not, which is probably a good bet
* most of the time. So for now, don't add it to FSM yet.
* XXX should we enter the new page into the free space map immediately,
* or just keep it for this backend's exclusive use in the short run
* (until VACUUM sees it)? Seems to depend on whether you expect the
* current backend to make more insertions or not, which is probably a
* good bet most of the time. So for now, don't add it to FSM yet.
*/
relation->rd_targblock = BufferGetBlockNumber(buffer);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.55 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.56 2005/11/22 18:17:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -1074,8 +1074,8 @@ toast_save_datum(Relation rel, Datum value)
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
* Note also that there had better not be any user-created index on the
* TOAST table, since we don't bother to update anything else.
* Note also that there had better not be any user-created index on
* the TOAST table, since we don't bother to update anything else.
*/
index_insert(toastidx, t_values, t_isnull,
&(toasttup->t_self),
@ -1213,9 +1213,9 @@ toast_fetch_datum(varattrib *attr)
/*
* Read the chunks by index
*
* Note that because the index is actually on (valueid, chunkidx) we will see
* the chunks in chunkidx order, even though we didn't explicitly ask for
* it.
* Note that because the index is actually on (valueid, chunkidx) we will
* see the chunks in chunkidx order, even though we didn't explicitly ask
* for it.
*/
nextidx = 0;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.50 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.51 2005/11/22 18:17:06 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@ -202,8 +202,8 @@ systable_beginscan(Relation heapRelation,
/*
* Change attribute numbers to be index column numbers.
*
* This code could be generalized to search for the index key numbers to
* substitute, but for now there's no need.
* This code could be generalized to search for the index key numbers
* to substitute, but for now there's no need.
*/
for (i = 0; i < nkeys; i++)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.128 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.129 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -104,8 +104,8 @@ top:
* If we're not allowing duplicates, make sure the key isn't already in
* the index.
*
* NOTE: obviously, _bt_check_unique can only detect keys that are already in
* the index; so it cannot defend against concurrent insertions of the
* NOTE: obviously, _bt_check_unique can only detect keys that are already
* in the index; so it cannot defend against concurrent insertions of the
* same key. We protect against that by means of holding a write lock on
* the target page. Any other would-be inserter of the same key must
* acquire a write lock on the same target page, so only one would-be
@ -114,8 +114,8 @@ top:
* our insertion, so no later inserter can fail to see our insertion.
* (This requires some care in _bt_insertonpg.)
*
* If we must wait for another xact, we release the lock while waiting, and
* then must start over completely.
* If we must wait for another xact, we release the lock while waiting,
* and then must start over completely.
*/
if (index_is_unique)
{
@ -193,8 +193,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/*
* We can skip items that are marked killed.
*
* Formerly, we applied _bt_isequal() before checking the kill flag,
* so as to fall out of the item loop as soon as possible.
* Formerly, we applied _bt_isequal() before checking the kill
* flag, so as to fall out of the item loop as soon as possible.
* However, in the presence of heavy update activity an index may
* contain many killed items with the same key; running
* _bt_isequal() on each killed item gets expensive. Furthermore
@ -431,11 +431,11 @@ _bt_insertonpg(Relation rel,
/*
* step right to next non-dead page
*
* must write-lock that page before releasing write lock on current
* page; else someone else's _bt_check_unique scan could fail to
* see our insertion. write locks on intermediate dead pages
* won't do because we don't know when they will get de-linked
* from the tree.
* must write-lock that page before releasing write lock on
* current page; else someone else's _bt_check_unique scan could
* fail to see our insertion. write locks on intermediate dead
* pages won't do because we don't know when they will get
* de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
@ -471,9 +471,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, so
* this comparison is correct even though we appear to be accounting only
* for the item and not for its line pointer.
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
* so this comparison is correct even though we appear to be accounting
* only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@ -1158,10 +1158,10 @@ _bt_insert_parent(Relation rel,
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
*
* If we have to search for the parent level, we do so by re-descending from
* the root. This is not super-efficient, but it's rare enough not to
* matter. (This path is also taken when called from WAL recovery --- we
* have no stack in that case.)
* If we have to search for the parent level, we do so by re-descending
* from the root. This is not super-efficient, but it's rare enough not
* to matter. (This path is also taken when called from WAL recovery ---
* we have no stack in that case.)
*/
if (is_root)
{

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.89 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.90 2005/11/22 18:17:06 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -412,16 +412,17 @@ _bt_checkpage(Relation rel, Buffer buf)
Page page = BufferGetPage(buf);
/*
* ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
* which means it either contains a reasonably sane page header or is
* all-zero. We have to defend against the all-zero case, however.
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" contains unexpected zero page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errmsg("index \"%s\" contains unexpected zero page at block %u",
RelationGetRelationName(rel),
BufferGetBlockNumber(buf)),
errhint("Please REINDEX it.")));
/*
@ -440,7 +441,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
* blkno == P_NEW means to get an unallocated index page. The page
* blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@ -475,21 +476,21 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
* In fact, it's worse than that: we can't even assume that it's safe to
* take a lock on the reported page. If somebody else has a lock on
* it, or even worse our own caller does, we could deadlock. (The
* In fact, it's worse than that: we can't even assume that it's safe
* to take a lock on the reported page. If somebody else has a lock
* on it, or even worse our own caller does, we could deadlock. (The
* own-caller scenario is actually not improbable. Consider an index
* on a serial or timestamp column. Nearly all splits will be at the
* rightmost page, so it's entirely likely that _bt_split will call us
* while holding a lock on the page most recently acquired from FSM.
* A VACUUM running concurrently with the previous split could well
* have placed that page back in FSM.)
* while holding a lock on the page most recently acquired from FSM. A
* VACUUM running concurrently with the previous split could well have
* placed that page back in FSM.)
*
* To get around that, we ask for only a conditional lock on the reported
* page. If we fail, then someone else is using the page, and we may
* reasonably assume it's not free. (If we happen to be wrong, the
* worst consequence is the page will be lost to use till the next
* VACUUM, which is no big problem.)
* To get around that, we ask for only a conditional lock on the
* reported page. If we fail, then someone else is using the page,
* and we may reasonably assume it's not free. (If we happen to be
* wrong, the worst consequence is the page will be lost to use till
* the next VACUUM, which is no big problem.)
*/
for (;;)
{
@ -839,12 +840,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
* We have to lock the pages we need to modify in the standard order:
* moving right, then up. Else we will deadlock against other writers.
*
* So, we need to find and write-lock the current left sibling of the target
* page. The sibling that was current a moment ago could have split, so
* we may have to move right. This search could fail if either the
* sibling or the target page was deleted by someone else meanwhile; if
* so, give up. (Right now, that should never happen, since page deletion
* is only done in VACUUM and there shouldn't be multiple VACUUMs
* So, we need to find and write-lock the current left sibling of the
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
* if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
if (leftsib != P_NONE)

View File

@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.133 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.134 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -307,8 +307,8 @@ btgettuple(PG_FUNCTION_ARGS)
* Save heap TID to use it in _bt_restscan. Then release the read lock on
* the buffer so that we aren't blocking other backends.
*
* NOTE: we do keep the pin on the buffer! This is essential to ensure that
* someone else doesn't delete the index entry we are stopped on.
* NOTE: we do keep the pin on the buffer! This is essential to ensure
* that someone else doesn't delete the index entry we are stopped on.
*/
if (res)
{
@ -774,8 +774,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* We can't use _bt_getbuf() here because it always applies
* _bt_checkpage(), which will barf on an all-zero page.
* We want to recycle all-zero pages, not fail.
* _bt_checkpage(), which will barf on an all-zero page. We want to
* recycle all-zero pages, not fail.
*/
buf = ReadBuffer(rel, blkno);
LockBuffer(buf, BT_READ);

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.96 2005/10/18 01:06:23 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.97 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -164,10 +164,11 @@ _bt_moveright(Relation rel,
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
* The page could even have split more than once, so scan as far as needed.
* The page could even have split more than once, so scan as far as
* needed.
*
* We also have to move right if we followed a link that brought us to a dead
* page.
* We also have to move right if we followed a link that brought us to a
* dead page.
*/
cmpval = nextkey ? 0 : 1;
@ -255,8 +256,8 @@ _bt_binsrch(Relation rel,
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
* For nextkey=true (cmpval=0), the loop invariant is: all slots before 'low'
* are <= scan key, all slots at or after 'high' are > scan key.
* For nextkey=true (cmpval=0), the loop invariant is: all slots before
* 'low' are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
@ -282,8 +283,8 @@ _bt_binsrch(Relation rel,
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
* On a leaf page, we always return the first key >= scan key (resp. > scan
* key), which could be the last slot + 1.
* On a leaf page, we always return the first key >= scan key (resp. >
* scan key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
@ -350,8 +351,8 @@ _bt_compare(Relation rel,
* you think about how multi-key ordering works, you'll understand why
* this is.
*
* We don't test for violation of this condition here, however. The initial
* setup for the index scan had better have gotten it right (see
* We don't test for violation of this condition here, however. The
* initial setup for the index scan had better have gotten it right (see
* _bt_first).
*/
@ -692,9 +693,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* where we need to start the scan, and set flag variables to control the
* code below.
*
* If nextkey = false, _bt_search and _bt_binsrch will locate the first item
* >= scan key. If nextkey = true, they will locate the first item > scan
* key.
* If nextkey = false, _bt_search and _bt_binsrch will locate the first
* item >= scan key. If nextkey = true, they will locate the first item >
* scan key.
*
* If goback = true, we will then step back one item, while if goback =
* false, we will start the scan on the located item.
@ -819,9 +820,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* than or equal to the scan key and we know that everything on later
* pages is greater than scan key.
*
* The actually desired starting point is either this item or the prior one,
* or in the end-of-page case it's the first item on the next page or the
* last item on this page. We apply _bt_step if needed to get to the
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
* the last item on this page. We apply _bt_step if needed to get to the
* right place.
*
* If _bt_step fails (meaning we fell off the end of the index in one
@ -1044,9 +1045,9 @@ _bt_walk_left(Relation rel, Buffer buf)
* the original page got deleted and isn't in the sibling chain at all
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here, because
* half-dead pages are still in the sibling chain. Caller must reject
* half-dead pages if wanted.
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
* because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)

View File

@ -56,7 +56,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95 2005/10/15 02:49:09 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.96 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -487,9 +487,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
* the per-page available space. Note that at this point, btisz doesn't
* include the ItemId.
*
* NOTE: similar code appears in _bt_insertonpg() to defend against oversize
* items being inserted into an already-existing index. But during
* creation of an index, we don't go through there.
* NOTE: similar code appears in _bt_insertonpg() to defend against
* oversize items being inserted into an already-existing index. But
* during creation of an index, we don't go through there.
*/
if (btisz > BTMaxItemSize(npage))
ereport(ERROR,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.65 2005/10/18 01:06:23 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.66 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -549,8 +549,8 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
* able to conclude no further tuples will pass, either. We have
* to look at the scan direction and the qual type.
*
* Note: the only case in which we would keep going after failing a
* required qual is if there are partially-redundant quals that
* Note: the only case in which we would keep going after failing
* a required qual is if there are partially-redundant quals that
* _bt_preprocess_keys() was unable to eliminate. For example,
* given "x > 4 AND x > 10" where both are cross-type comparisons
* and so not removable, we might start the scan at the x = 4

View File

@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.12 2005/11/05 21:19:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.13 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -129,22 +129,23 @@ typedef struct MultiXactStateData
* member of a MultiXact, and that MultiXact would have to be created
* during or after the lock acquisition.)
*
* OldestVisibleMXactId[k] is the oldest MultiXactId each backend's current
* transaction(s) think is potentially live, or InvalidMultiXactId when
* not in a transaction or not in a transaction that's paid any attention
* to MultiXacts yet. This is computed when first needed in a given
* transaction, and cleared at transaction end. We can compute it as the
* minimum of the valid OldestMemberMXactId[] entries at the time we
* compute it (using nextMXact if none are valid). Each backend is
* OldestVisibleMXactId[k] is the oldest MultiXactId each backend's
* current transaction(s) think is potentially live, or InvalidMultiXactId
* when not in a transaction or not in a transaction that's paid any
* attention to MultiXacts yet. This is computed when first needed in a
* given transaction, and cleared at transaction end. We can compute it
* as the minimum of the valid OldestMemberMXactId[] entries at the time
* we compute it (using nextMXact if none are valid). Each backend is
* required not to attempt to access any SLRU data for MultiXactIds older
* than its own OldestVisibleMXactId[] setting; this is necessary because
* the checkpointer could truncate away such data at any instant.
*
* The checkpointer can compute the safe truncation point as the oldest valid
* value among all the OldestMemberMXactId[] and OldestVisibleMXactId[]
* entries, or nextMXact if none are valid. Clearly, it is not possible
* for any later-computed OldestVisibleMXactId value to be older than
* this, and so there is no risk of truncating data that is still needed.
* The checkpointer can compute the safe truncation point as the oldest
* valid value among all the OldestMemberMXactId[] and
* OldestVisibleMXactId[] entries, or nextMXact if none are valid.
* Clearly, it is not possible for any later-computed OldestVisibleMXactId
* value to be older than this, and so there is no risk of truncating data
* that is still needed.
*/
MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */
} MultiXactStateData;
@ -631,8 +632,8 @@ CreateMultiXactId(int nxids, TransactionId *xids)
}
/*
* Assign the MXID and offsets range to use, and make sure there is
* space in the OFFSETs and MEMBERs files. NB: this routine does
* Assign the MXID and offsets range to use, and make sure there is space
* in the OFFSETs and MEMBERs files. NB: this routine does
* START_CRIT_SECTION().
*/
multi = GetNewMultiXactId(nxids, &offset);
@ -788,9 +789,9 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactOffset(result);
/*
* Reserve the members space, similarly to above. Also, be
* careful not to return zero as the starting offset for any multixact.
* See GetMultiXactIdMembers() for motivation.
* Reserve the members space, similarly to above. Also, be careful not to
* return zero as the starting offset for any multixact. See
* GetMultiXactIdMembers() for motivation.
*/
nextOffset = MultiXactState->nextOffset;
if (nextOffset == 0)
@ -804,8 +805,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactMember(nextOffset, nxids);
/*
* Critical section from here until caller has written the data into
* the just-reserved SLRU space; we don't want to error out with a partly
* Critical section from here until caller has written the data into the
* just-reserved SLRU space; we don't want to error out with a partly
* written MultiXact structure. (In particular, failing to write our
* start offset after advancing nextMXact would effectively corrupt the
* previous MultiXact.)
@ -819,8 +820,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
* be prepared to deal with that. Similarly, nextOffset may be zero,
* but we won't use that as the actual start offset of the next multixact.
* be prepared to deal with that. Similarly, nextOffset may be zero, but
* we won't use that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
@ -881,7 +882,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* SLRU data if we did try to examine it.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. We just
* seen, it implies undetected ID wraparound has occurred. We just
* silently assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
@ -897,7 +898,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Acquire the shared lock just long enough to grab the current counter
* values. We may need both nextMXact and nextOffset; see below.
* values. We may need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@ -915,27 +916,27 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
* and the number of members in the multixact. We determine the latter
* as the difference between this multixact's starting offset and the
* next one's. However, there are some corner cases to worry about:
* and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there
* is no next one to look at. In this case the nextOffset value we just
* 1. This multixact may be the latest one created, in which case there is
* no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in:
* that is, another process may have done GetNewMultiXactId but not yet
* written the offset entry for that ID. In that scenario, it is
* guaranteed that the offset entry for that multixact exists (because
* GetNewMultiXactId won't release MultiXactGenLock until it does)
* but contains zero (because we are careful to pre-zero offset pages).
* Because GetNewMultiXactId will never return zero as the starting offset
* for a multixact, when we read zero as the next multixact's offset, we
* know we have this case. We sleep for a bit and try again.
* 2. The next multixact may still be in process of being filled in: that
* is, another process may have done GetNewMultiXactId but not yet written
* the offset entry for that ID. In that scenario, it is guaranteed that
* the offset entry for that multixact exists (because GetNewMultiXactId
* won't release MultiXactGenLock until it does) but contains zero
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
* have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one
* to handle case #2, there is an ambiguity near the point of offset
* wraparound. If we see next multixact's offset is one, is that our
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
* wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid

View File

@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
* must be held to examine or modify any shared state. A process that is
* must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@ -37,7 +37,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.30 2005/11/05 21:19:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.31 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -236,13 +236,14 @@ SimpleLruWaitIO(SlruCtl ctl, int slotno)
LWLockAcquire(shared->buffer_locks[slotno], LW_SHARED);
LWLockRelease(shared->buffer_locks[slotno]);
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
/*
* If the slot is still in an io-in-progress state, then either someone
* already started a new I/O on the slot, or a previous I/O failed and
* neglected to reset the page state. That shouldn't happen, really,
* but it seems worth a few extra cycles to check and recover from it.
* We can cheaply test for failure by seeing if the buffer lock is still
* held (we assume that transaction abort would release the lock).
* neglected to reset the page state. That shouldn't happen, really, but
* it seems worth a few extra cycles to check and recover from it. We can
* cheaply test for failure by seeing if the buffer lock is still held (we
* assume that transaction abort would release the lock).
*/
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
@ -252,7 +253,8 @@ SimpleLruWaitIO(SlruCtl ctl, int slotno)
/* indeed, the I/O must have failed */
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
shared->page_status[slotno] = SLRU_PAGE_EMPTY;
else /* write_in_progress */
else
/* write_in_progress */
{
shared->page_status[slotno] = SLRU_PAGE_VALID;
shared->page_dirty[slotno] = true;
@ -375,8 +377,8 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
}
/*
* Do nothing if page is not dirty, or if buffer no longer contains
* the same page we were called for.
* Do nothing if page is not dirty, or if buffer no longer contains the
* same page we were called for.
*/
if (!shared->page_dirty[slotno] ||
shared->page_status[slotno] != SLRU_PAGE_VALID ||
@ -384,8 +386,8 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
return;
/*
* Mark the slot write-busy, and clear the dirtybit. After this point,
* a transaction status update on this page will mark it dirty again.
* Mark the slot write-busy, and clear the dirtybit. After this point, a
* transaction status update on this page will mark it dirty again.
*/
shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
shared->page_dirty[slotno] = false;
@ -902,7 +904,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
* the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/

View File

@ -22,7 +22,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.12 2005/11/05 21:19:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.13 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -261,8 +261,8 @@ ShutdownSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
* This is not actually necessary from a correctness point of view. We do it
* merely as a debugging aid.
* This is not actually necessary from a correctness point of view. We do
* it merely as a debugging aid.
*/
SimpleLruFlush(SubTransCtl, false);
}
@ -276,9 +276,9 @@ CheckPointSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
* This is not actually necessary from a correctness point of view. We do it
* merely to improve the odds that writing of dirty pages is done by the
* checkpoint process and not by backends.
* This is not actually necessary from a correctness point of view. We do
* it merely to improve the odds that writing of dirty pages is done by
* the checkpoint process and not by backends.
*/
SimpleLruFlush(SubTransCtl, true);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66 2005/10/15 02:49:09 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.67 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@ -174,9 +174,9 @@ TransactionIdDidCommit(TransactionId transactionId)
* pg_subtrans; instead assume that the parent crashed without cleaning up
* its children.
*
* Originally we Assert'ed that the result of SubTransGetParent was not zero.
* However with the introduction of prepared transactions, there can be a
* window just after database startup where we do not have complete
* Originally we Assert'ed that the result of SubTransGetParent was not
* zero. However with the introduction of prepared transactions, there can
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
* zeroed. Since this case should not happen under normal conditions, it

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.16 2005/10/29 00:31:50 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.17 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@ -851,10 +851,10 @@ EndPrepare(GlobalTransaction gxact)
/*
* Create the 2PC state file.
*
* Note: because we use BasicOpenFile(), we are responsible for ensuring the
* FD gets closed in any error exit path. Once we get into the critical
* section, though, it doesn't matter since any failure causes PANIC
* anyway.
* Note: because we use BasicOpenFile(), we are responsible for ensuring
* the FD gets closed in any error exit path. Once we get into the
* critical section, though, it doesn't matter since any failure causes
* PANIC anyway.
*/
TwoPhaseFilePath(path, xid);
@ -911,8 +911,8 @@ EndPrepare(GlobalTransaction gxact)
* The state file isn't valid yet, because we haven't written the correct
* CRC yet. Before we do that, insert entry in WAL and flush it to disk.
*
* Between the time we have written the WAL entry and the time we write out
* the correct state file CRC, we have an inconsistency: the xact is
* Between the time we have written the WAL entry and the time we write
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
* the write --- then, WAL replay should repair the inconsistency. The
@ -1344,11 +1344,11 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
* it just long enough to make a list of the XIDs that require fsyncing,
* and then do the I/O afterwards.
*
* This approach creates a race condition: someone else could delete a GXACT
* between the time we release TwoPhaseStateLock and the time we try to
* open its state file. We handle this by special-casing ENOENT failures:
* if we see that, we verify that the GXACT is no longer valid, and if so
* ignore the failure.
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
* to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
if (max_prepared_xacts <= 0)
return; /* nothing to do */

View File

@ -6,7 +6,7 @@
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.68 2005/10/29 00:31:50 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.69 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -56,8 +56,8 @@ GetNewTransactionId(bool isSubXact)
* (which gives an escape hatch to the DBA who ignored all those
* warnings).
*
* Test is coded to fall out as fast as possible during normal operation, ie,
* when the warn limit is set and we haven't violated it.
* Test is coded to fall out as fast as possible during normal operation,
* ie, when the warn limit is set and we haven't violated it.
*/
if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit) &&
TransactionIdIsValid(ShmemVariableCache->xidWarnLimit))
@ -268,8 +268,8 @@ GetNewObjectId(void)
* right after a wrap occurs, so as to avoid a possibly large number of
* iterations in GetNewOid.) Note we are relying on unsigned comparison.
*
* During initdb, we start the OID generator at FirstBootstrapObjectId, so we
* only enforce wrapping to that point when in bootstrap or standalone
* During initdb, we start the OID generator at FirstBootstrapObjectId, so
* we only enforce wrapping to that point when in bootstrap or standalone
* mode. The first time through this routine after normal postmaster
* start, the counter will be forced up to FirstNormalObjectId. This
* mechanism leaves the OIDs between FirstBootstrapObjectId and

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215 2005/10/15 02:49:09 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.216 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -750,8 +750,8 @@ RecordTransactionCommit(void)
* XLOG record generated by nextval will hit the disk before we report
* the transaction committed.
*
* Note: if we generated a commit record above, MyXactMadeXLogEntry will
* certainly be set now.
* Note: if we generated a commit record above, MyXactMadeXLogEntry
* will certainly be set now.
*/
if (MyXactMadeXLogEntry)
{
@ -762,8 +762,8 @@ RecordTransactionCommit(void)
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
*
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active
* We do not sleep if enableFsync is not turned on, nor if there
* are fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
@ -993,10 +993,10 @@ RecordTransactionAbort(void)
* nowhere in permanent storage, so no one else will ever care if it
* committed.)
*
* We do not flush XLOG to disk unless deleting files, since the default
* assumption after a crash would be that we aborted, anyway. For the
* same reason, we don't need to worry about interlocking against
* checkpoint start.
* We do not flush XLOG to disk unless deleting files, since the
* default assumption after a crash would be that we aborted, anyway.
* For the same reason, we don't need to worry about interlocking
* against checkpoint start.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
@ -1042,8 +1042,8 @@ RecordTransactionAbort(void)
* Mark the transaction aborted in clog. This is not absolutely
* necessary but we may as well do it while we are here.
*
* The ordering here isn't critical but it seems best to mark the parent
* first. This assures an atomic transition of all the
* The ordering here isn't critical but it seems best to mark the
* parent first. This assures an atomic transition of all the
* subtransactions to aborted state from the point of view of
* concurrent TransactionIdDidAbort calls.
*/
@ -1520,11 +1520,11 @@ CommitTransaction(void)
* it's too late to abort the transaction. This should be just
* noncritical resource releasing.
*
* The ordering of operations is not entirely random. The idea is: release
* resources visible to other backends (eg, files, buffer pins); then
* release locks; then release backend-local resources. We want to release
* locks at the point where any backend waiting for us will see our
* transaction as being fully cleaned up.
* The ordering of operations is not entirely random. The idea is:
* release resources visible to other backends (eg, files, buffer pins);
* then release locks; then release backend-local resources. We want to
* release locks at the point where any backend waiting for us will see
* our transaction as being fully cleaned up.
*
* Resources that can be associated with individual queries are handled by
* the ResourceOwner mechanism. The other calls here are for backend-wide
@ -1630,9 +1630,9 @@ PrepareTransaction(void)
* Do pre-commit processing (most of this stuff requires database access,
* and in fact could still cause an error...)
*
* It is possible for PrepareHoldablePortals to invoke functions that queue
* deferred triggers, and it's also possible that triggers create holdable
* cursors. So we have to loop until there's nothing left to do.
* It is possible for PrepareHoldablePortals to invoke functions that
* queue deferred triggers, and it's also possible that triggers create
* holdable cursors. So we have to loop until there's nothing left to do.
*/
for (;;)
{
@ -1715,9 +1715,9 @@ PrepareTransaction(void)
/*
* Here is where we really truly prepare.
*
* We have to record transaction prepares even if we didn't make any updates,
* because the transaction manager might get confused if we lose a global
* transaction.
* We have to record transaction prepares even if we didn't make any
* updates, because the transaction manager might get confused if we lose
* a global transaction.
*/
EndPrepare(gxact);
@ -1868,10 +1868,11 @@ AbortTransaction(void)
* s->currentUser, since it may not be set yet; instead rely on internal
* state of miscinit.c.
*
* (Note: it is not necessary to restore session authorization here because
* that can only be changed via GUC, and GUC will take care of rolling it
* back if need be. However, an error within a SECURITY DEFINER function
* could send control here with the wrong current userid.)
* (Note: it is not necessary to restore session authorization here
* because that can only be changed via GUC, and GUC will take care of
* rolling it back if need be. However, an error within a SECURITY
* DEFINER function could send control here with the wrong current
* userid.)
*/
AtAbort_UserId();
@ -2353,8 +2354,8 @@ AbortCurrentTransaction(void)
/*
* Here, we are already in an aborted transaction state and are
* waiting for a ROLLBACK, but for some reason we failed again!
* So we just remain in the abort state.
* waiting for a ROLLBACK, but for some reason we failed again! So
* we just remain in the abort state.
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.222 2005/10/29 00:31:50 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.223 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -571,11 +571,11 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* the whole record in the order "rdata, then backup blocks, then record
* header".
*
* We may have to loop back to here if a race condition is detected below. We
* could prevent the race by doing all this work while holding the insert
* lock, but it seems better to avoid doing CRC calculations while holding
* the lock. This means we have to be careful about modifying the rdata
* chain until we know we aren't going to loop back again. The only
* We may have to loop back to here if a race condition is detected below.
* We could prevent the race by doing all this work while holding the
* insert lock, but it seems better to avoid doing CRC calculations while
* holding the lock. This means we have to be careful about modifying the
* rdata chain until we know we aren't going to loop back again. The only
* change we allow ourselves to make earlier is to set rdt->data = NULL in
* chain items we have decided we will have to back up the whole buffer
* for. This is OK because we will certainly decide the same thing again
@ -763,9 +763,9 @@ begin:;
* now irrevocably changed the input rdata chain. At the exit of this
* loop, write_len includes the backup block data.
*
* Also set the appropriate info bits to show which buffers were backed up.
* The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct buffer
* value (ignoring InvalidBuffer) appearing in the rdata chain.
* Also set the appropriate info bits to show which buffers were backed
* up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
* buffer value (ignoring InvalidBuffer) appearing in the rdata chain.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@ -1666,20 +1666,20 @@ XLogFlush(XLogRecPtr record)
* problem; most likely, the requested flush point is past end of XLOG.
* This has been seen to occur when a disk page has a corrupted LSN.
*
* Formerly we treated this as a PANIC condition, but that hurts the system's
* robustness rather than helping it: we do not want to take down the
* whole system due to corruption on one data page. In particular, if the
* bad page is encountered again during recovery then we would be unable
* to restart the database at all! (This scenario has actually happened
* in the field several times with 7.1 releases. Note that we cannot get
* here while InRedo is true, but if the bad page is brought in and marked
* dirty during recovery then CreateCheckPoint will try to flush it at the
* end of recovery.)
* Formerly we treated this as a PANIC condition, but that hurts the
* system's robustness rather than helping it: we do not want to take down
* the whole system due to corruption on one data page. In particular, if
* the bad page is encountered again during recovery then we would be
* unable to restart the database at all! (This scenario has actually
* happened in the field several times with 7.1 releases. Note that we
* cannot get here while InRedo is true, but if the bad page is brought in
* and marked dirty during recovery then CreateCheckPoint will try to
* flush it at the end of recovery.)
*
* The current approach is to ERROR under normal conditions, but only WARNING
* during recovery, so that the system can be brought up even if there's a
* corrupt LSN. Note that for calls from xact.c, the ERROR will be
* promoted to PANIC since xact.c calls this routine inside a critical
* The current approach is to ERROR under normal conditions, but only
* WARNING during recovery, so that the system can be brought up even if
* there's a corrupt LSN. Note that for calls from xact.c, the ERROR will
* be promoted to PANIC since xact.c calls this routine inside a critical
* section. However, calls from bufmgr.c are not within critical sections
* and so we will not force a restart for a bad LSN on a data page.
*/
@ -2152,14 +2152,14 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* preserved correctly when we copied to archive. Our aim is robustness,
* so we elect not to do this.
*
* If we cannot obtain the log file from the archive, however, we will try to
* use the XLOGDIR file if it exists. This is so that we can make use of
* log segments that weren't yet transferred to the archive.
* If we cannot obtain the log file from the archive, however, we will try
* to use the XLOGDIR file if it exists. This is so that we can make use
* of log segments that weren't yet transferred to the archive.
*
* Notice that we don't actually overwrite any files when we copy back from
* archive because the recoveryRestoreCommand may inadvertently restore
* inappropriate xlogs, or they may be corrupt, so we may wish to fallback
* to the segments remaining in current XLOGDIR later. The
* Notice that we don't actually overwrite any files when we copy back
* from archive because the recoveryRestoreCommand may inadvertently
* restore inappropriate xlogs, or they may be corrupt, so we may wish to
* fallback to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
@ -2246,11 +2246,11 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* command apparently succeeded, but let's make sure the file is
* really there now and has the correct size.
*
* XXX I made wrong-size a fatal error to ensure the DBA would notice it,
* but is that too strong? We could try to plow ahead with a local
* copy of the file ... but the problem is that there probably isn't
* one, and we'd incorrectly conclude we've reached the end of WAL and
* we're done recovering ...
* XXX I made wrong-size a fatal error to ensure the DBA would notice
* it, but is that too strong? We could try to plow ahead with a
* local copy of the file ... but the problem is that there probably
* isn't one, and we'd incorrectly conclude we've reached the end of
* WAL and we're done recovering ...
*/
if (stat(xlogpath, &stat_buf) == 0)
{
@ -3533,8 +3533,8 @@ ReadControlFile(void)
/*
* Do compatibility checking immediately. We do this here for 2 reasons:
*
* (1) if the database isn't compatible with the backend executable, we want
* to abort before we can possibly do any damage;
* (1) if the database isn't compatible with the backend executable, we
* want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file for
@ -4148,9 +4148,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
* descriptive of what our current database state is, because that is what
* we replayed from.
*
* Note that if we are establishing a new timeline, ThisTimeLineID is already
* set to the new value, and so we will create a new file instead of
* overwriting any existing file.
* Note that if we are establishing a new timeline, ThisTimeLineID is
* already set to the new value, and so we will create a new file instead
* of overwriting any existing file.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
@ -4341,8 +4341,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
* Note: in most control paths, *ControlFile is already valid and we need not
* do ReadControlFile() here, but might as well do it to be sure.
* Note: in most control paths, *ControlFile is already valid and we need
* not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@ -4766,14 +4766,14 @@ StartupXLOG(void)
/*
* Perform a new checkpoint to update our recovery activity to disk.
*
* Note that we write a shutdown checkpoint rather than an on-line one.
* This is not particularly critical, but since we may be assigning a
* new TLI, using a shutdown checkpoint allows us to have the rule
* that TLI only changes in shutdown checkpoints, which allows some
* extra error checking in xlog_redo.
* Note that we write a shutdown checkpoint rather than an on-line
* one. This is not particularly critical, but since we may be
* assigning a new TLI, using a shutdown checkpoint allows us to have
* the rule that TLI only changes in shutdown checkpoints, which
* allows some extra error checking in xlog_redo.
*
* In case we had to use the secondary checkpoint, make sure that it will
* still be shown as the secondary checkpoint after this
* In case we had to use the secondary checkpoint, make sure that it
* will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@ -5106,10 +5106,10 @@ CreateCheckPoint(bool shutdown, bool force)
* (Perhaps it'd make even more sense to checkpoint only when the previous
* checkpoint record is in a different xlog page?)
*
* We have to make two tests to determine that nothing has happened since the
* start of the last checkpoint: current insertion point must match the
* end of the last checkpoint record, and its redo pointer must point to
* itself.
* We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must match
* the end of the last checkpoint record, and its redo pointer must point
* to itself.
*/
if (!shutdown && !force)
{
@ -5198,11 +5198,11 @@ CreateCheckPoint(bool shutdown, bool force)
* Having constructed the checkpoint record, ensure all shmem disk buffers
* and commit-log buffers are flushed to disk.
*
* This I/O could fail for various reasons. If so, we will fail to complete
* the checkpoint, but there is no reason to force a system panic.
* Accordingly, exit critical section while doing it. (If we are doing a
* shutdown checkpoint, we probably *should* panic --- but that will
* happen anyway because we'll still be inside the critical section
* This I/O could fail for various reasons. If so, we will fail to
* complete the checkpoint, but there is no reason to force a system
* panic. Accordingly, exit critical section while doing it. (If we are
* doing a shutdown checkpoint, we probably *should* panic --- but that
* will happen anyway because we'll still be inside the critical section
* established by ShutdownXLOG.)
*/
END_CRIT_SECTION();

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.208 2005/10/20 20:05:44 tgl Exp $
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.209 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -466,8 +466,8 @@ BootstrapMain(int argc, char *argv[])
/*
* Process bootstrap input.
*
* the sed script boot.sed renamed yyparse to Int_yyparse for the bootstrap
* parser to avoid conflicts with the normal SQL parser
* the sed script boot.sed renamed yyparse to Int_yyparse for the
* bootstrap parser to avoid conflicts with the normal SQL parser
*/
Int_yyparse();

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.121 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.122 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* See acl.h.
@ -110,10 +110,10 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
foreach(j, grantees)
{
AclItem aclitem;
AclItem aclitem;
Acl *newer_acl;
aclitem.ai_grantee = lfirst_oid(j);
aclitem. ai_grantee = lfirst_oid(j);
/*
* Grant options can only be granted to individual roles, not PUBLIC.
@ -165,15 +165,15 @@ ExecuteGrantStmt(GrantStmt *stmt)
AclMode privileges;
ListCell *cell;
bool all_privs;
AclMode all_privileges = (AclMode) 0;
char *errormsg = NULL;
AclMode all_privileges = (AclMode) 0;
char *errormsg = NULL;
/*
* Convert the PrivGrantee list into an Oid list. Note that at this point
* we insert an ACL_ID_PUBLIC into the list if an empty role name is
* detected (which is what the grammar uses if PUBLIC is found), so
* downstream there shouldn't be any additional work needed to support this
* case.
* downstream there shouldn't be any additional work needed to support
* this case.
*/
foreach(cell, stmt->grantees)
{
@ -256,7 +256,7 @@ ExecuteGrantStmt(GrantStmt *stmt)
/*
* ExecGrantStmt_oids
*
* "Internal" entrypoint for granting and revoking privileges. The arguments
* "Internal" entrypoint for granting and revoking privileges. The arguments
* it receives are lists of Oids or have been otherwise converted from text
* format to internal format.
*/
@ -307,8 +307,8 @@ ExecGrantStmt_oids(bool is_grant, GrantObjectType objtype, List *objects,
static List *
objectNamesToOids(GrantObjectType objtype, List *objnames)
{
List *objects = NIL;
ListCell *cell;
List *objects = NIL;
ListCell *cell;
Assert(objnames != NIL);
@ -328,7 +328,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
foreach(cell, objnames)
{
char *dbname = strVal(lfirst(cell));
ScanKeyData entry[1];
ScanKeyData entry[1];
HeapScanDesc scan;
HeapTuple tuple;
Relation relation;
@ -336,8 +336,8 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
relation = heap_open(DatabaseRelationId, AccessShareLock);
/*
* There's no syscache for pg_database, so we must
* look the hard way.
* There's no syscache for pg_database, so we must look the
* hard way.
*/
ScanKeyInit(&entry[0],
Anum_pg_database_datname,
@ -348,7 +348,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname)));
errmsg("database \"%s\" does not exist", dbname)));
objects = lappend_oid(objects, HeapTupleGetOid(tuple));
heap_close(relation, AccessShareLock);
@ -370,8 +370,8 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
case ACL_OBJECT_LANGUAGE:
foreach(cell, objnames)
{
char *langname = strVal(lfirst(cell));
HeapTuple tuple;
char *langname = strVal(lfirst(cell));
HeapTuple tuple;
tuple = SearchSysCache(LANGNAME,
PointerGetDatum(langname),
@ -379,7 +379,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("language \"%s\" does not exist", langname)));
errmsg("language \"%s\" does not exist", langname)));
objects = lappend_oid(objects, HeapTupleGetOid(tuple));
@ -387,7 +387,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
}
break;
case ACL_OBJECT_NAMESPACE:
foreach (cell, objnames)
foreach(cell, objnames)
{
char *nspname = strVal(lfirst(cell));
HeapTuple tuple;
@ -398,7 +398,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
errmsg("schema \"%s\" does not exist", nspname)));
errmsg("schema \"%s\" does not exist", nspname)));
objects = lappend_oid(objects, HeapTupleGetOid(tuple));
@ -406,13 +406,13 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
}
break;
case ACL_OBJECT_TABLESPACE:
foreach (cell, objnames)
foreach(cell, objnames)
{
char *spcname = strVal(lfirst(cell));
ScanKeyData entry[1];
HeapScanDesc scan;
HeapTuple tuple;
Relation relation;
char *spcname = strVal(lfirst(cell));
ScanKeyData entry[1];
HeapScanDesc scan;
HeapTuple tuple;
Relation relation;
relation = heap_open(TableSpaceRelationId, AccessShareLock);
@ -426,7 +426,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("tablespace \"%s\" does not exist", spcname)));
errmsg("tablespace \"%s\" does not exist", spcname)));
objects = lappend_oid(objects, HeapTupleGetOid(tuple));
@ -456,7 +456,7 @@ ExecGrant_Relation(bool is_grant, List *objects, bool all_privs,
relation = heap_open(RelationRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid relOid = lfirst_oid(cell);
Datum aclDatum;
@ -498,6 +498,7 @@ ExecGrant_Relation(bool is_grant, List *objects, bool all_privs,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is a composite type",
NameStr(pg_class_tuple->relname))));
/*
* Get owner ID and working copy of existing ACL. If there's no ACL,
* substitute the proper default.
@ -622,7 +623,7 @@ ExecGrant_Database(bool is_grant, List *objects, bool all_privs,
relation = heap_open(DatabaseRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid datId = lfirst_oid(cell);
Form_pg_database pg_database_tuple;
@ -786,7 +787,7 @@ ExecGrant_Function(bool is_grant, List *objects, bool all_privs,
relation = heap_open(ProcedureRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid funcId = lfirst_oid(cell);
Form_pg_proc pg_proc_tuple;
@ -912,7 +913,7 @@ ExecGrant_Function(bool is_grant, List *objects, bool all_privs,
CatalogUpdateIndexes(relation, newtuple);
/* Update the shared dependency ACL info */
updateAclDependencies(ProcedureRelationId, funcId,
updateAclDependencies(ProcedureRelationId, funcId,
ownerId, is_grant,
noldmembers, oldmembers,
nnewmembers, newmembers);
@ -941,7 +942,7 @@ ExecGrant_Language(bool is_grant, List *objects, bool all_privs,
relation = heap_open(LanguageRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid langid = lfirst_oid(cell);
Form_pg_language pg_language_tuple;
@ -976,14 +977,14 @@ ExecGrant_Language(bool is_grant, List *objects, bool all_privs,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("language \"%s\" is not trusted",
NameStr(pg_language_tuple->lanname)),
errhint("Only superusers may use untrusted languages.")));
errhint("Only superusers may use untrusted languages.")));
/*
* Get owner ID and working copy of existing ACL. If there's no ACL,
* substitute the proper default.
*
* Note: for now, languages are treated as owned by the bootstrap user.
* We should add an owner column to pg_language instead.
* Note: for now, languages are treated as owned by the bootstrap
* user. We should add an owner column to pg_language instead.
*/
ownerId = BOOTSTRAP_SUPERUSERID;
aclDatum = SysCacheGetAttr(LANGNAME, tuple, Anum_pg_language_lanacl,
@ -1095,8 +1096,8 @@ ExecGrant_Language(bool is_grant, List *objects, bool all_privs,
static void
ExecGrant_Namespace(bool is_grant, List *objects, bool all_privs,
AclMode privileges, List *grantees, bool grant_option,
DropBehavior behavior)
AclMode privileges, List *grantees, bool grant_option,
DropBehavior behavior)
{
Relation relation;
ListCell *cell;
@ -1282,7 +1283,7 @@ ExecGrant_Tablespace(bool is_grant, List *objects, bool all_privs,
int nnewmembers;
Oid *oldmembers;
Oid *newmembers;
ScanKeyData entry[1];
ScanKeyData entry[1];
SysScanDesc scan;
HeapTuple tuple;
@ -1691,7 +1692,7 @@ pg_database_aclmask(Oid db_oid, Oid roleid,
AclMode result;
Relation pg_database;
ScanKeyData entry[1];
SysScanDesc scan;
SysScanDesc scan;
HeapTuple tuple;
Datum aclDatum;
bool isNull;
@ -1887,8 +1888,8 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid,
* the namespace. If we don't have CREATE TEMP, act as though we have
* only USAGE (and not CREATE) rights.
*
* This may seem redundant given the check in InitTempTableNamespace, but it
* really isn't since current user ID may have changed since then. The
* This may seem redundant given the check in InitTempTableNamespace, but
* it really isn't since current user ID may have changed since then. The
* upshot of this behavior is that a SECURITY DEFINER function can create
* temp tables that can then be accessed (if permission is granted) by
* code in the same session that doesn't have permissions to create temp
@ -1956,7 +1957,7 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid,
AclMode result;
Relation pg_tablespace;
ScanKeyData entry[1];
SysScanDesc scan;
SysScanDesc scan;
HeapTuple tuple;
Datum aclDatum;
bool isNull;
@ -2247,7 +2248,7 @@ pg_tablespace_ownercheck(Oid spc_oid, Oid roleid)
{
Relation pg_tablespace;
ScanKeyData entry[1];
SysScanDesc scan;
SysScanDesc scan;
HeapTuple spctuple;
Oid spcowner;
@ -2316,7 +2317,7 @@ pg_database_ownercheck(Oid db_oid, Oid roleid)
{
Relation pg_database;
ScanKeyData entry[1];
SysScanDesc scan;
SysScanDesc scan;
HeapTuple dbtuple;
Oid dba;

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.47 2005/10/15 02:49:12 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.48 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -276,8 +276,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
* that depend on it. For each one that is AUTO or INTERNAL, visit the
* referencing object.
*
* When dropping a whole object (subId = 0), find pg_depend records for its
* sub-objects too.
* When dropping a whole object (subId = 0), find pg_depend records for
* its sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
@ -411,8 +411,8 @@ recursiveDeletion(const ObjectAddress *object,
* avoid infinite recursion in the case of cycles. Also, some dependency
* types require extra processing here.
*
* When dropping a whole object (subId = 0), remove all pg_depend records for
* its sub-objects too.
* When dropping a whole object (subId = 0), remove all pg_depend records
* for its sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.292 2005/10/18 01:06:23 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.293 2005/11/22 18:17:08 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -697,8 +697,8 @@ heap_create_with_catalog(const char *relname,
/*
* Allocate an OID for the relation, unless we were told what to use.
*
* The OID will be the relfilenode as well, so make sure it doesn't collide
* with either pg_class OIDs or existing physical files.
* The OID will be the relfilenode as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(relid))
relid = GetNewRelFileNode(reltablespace, shared_relation,
@ -724,8 +724,8 @@ heap_create_with_catalog(const char *relname,
* since defining a relation also defines a complex type, we add a new
* system type corresponding to the new relation.
*
* NOTE: we could get a unique-index failure here, in case the same name has
* already been used for a type.
* NOTE: we could get a unique-index failure here, in case the same name
* has already been used for a type.
*/
new_type_oid = AddNewRelationType(relname,
relnamespace,
@ -778,9 +778,9 @@ heap_create_with_catalog(const char *relname,
/*
* store constraints and defaults passed in the tupdesc, if any.
*
* NB: this may do a CommandCounterIncrement and rebuild the relcache entry,
* so the relation must be valid and self-consistent at this point. In
* particular, there are not yet constraints and defaults anywhere.
* NB: this may do a CommandCounterIncrement and rebuild the relcache
* entry, so the relation must be valid and self-consistent at this point.
* In particular, there are not yet constraints and defaults anywhere.
*/
StoreConstraints(new_rel_desc, tupdesc);
@ -1329,8 +1329,9 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
/*
* Find columns of rel that are used in ccbin
*
* NB: pull_var_clause is okay here only because we don't allow subselects in
* check constraints; it would fail to examine the contents of subselects.
* NB: pull_var_clause is okay here only because we don't allow subselects
* in check constraints; it would fail to examine the contents of
* subselects.
*/
varList = pull_var_clause(expr, false);
keycount = list_length(varList);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.261 2005/10/15 02:49:12 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.262 2005/11/22 18:17:08 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -524,8 +524,8 @@ index_create(Oid heapRelationId,
/*
* Allocate an OID for the index, unless we were told what to use.
*
* The OID will be the relfilenode as well, so make sure it doesn't collide
* with either pg_class OIDs or existing physical files.
* The OID will be the relfilenode as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(indexRelationId))
indexRelationId = GetNewRelFileNode(tableSpaceId, shared_relation,
@ -600,16 +600,16 @@ index_create(Oid heapRelationId,
/*
* Register constraint and dependencies for the index.
*
* If the index is from a CONSTRAINT clause, construct a pg_constraint entry.
* The index is then linked to the constraint, which in turn is linked to
* the table. If it's not a CONSTRAINT, make the dependency directly on
* the table.
* If the index is from a CONSTRAINT clause, construct a pg_constraint
* entry. The index is then linked to the constraint, which in turn is
* linked to the table. If it's not a CONSTRAINT, make the dependency
* directly on the table.
*
* We don't need a dependency on the namespace, because there'll be an
* indirect dependency via our parent table.
*
* During bootstrap we can't register any dependencies, and we don't try to
* make a constraint either.
* During bootstrap we can't register any dependencies, and we don't try
* to make a constraint either.
*/
if (!IsBootstrapProcessingMode())
{
@ -737,8 +737,8 @@ index_create(Oid heapRelationId,
* delayed till later (ALTER TABLE can save work in some cases with this).
* Otherwise, we call the AM routine that constructs the index.
*
* In normal processing mode, the heap and index relations are closed, but we
* continue to hold the ShareLock on the heap and the exclusive lock on
* In normal processing mode, the heap and index relations are closed, but
* we continue to hold the ShareLock on the heap and the exclusive lock on
* the index that we acquired above, until end of transaction.
*/
if (IsBootstrapProcessingMode())
@ -1243,8 +1243,8 @@ UpdateStats(Oid relid, double reltuples)
* tuple in-place. (Note: as of PG 8.0 this isn't called during
* bootstrap, but leave the code here for possible future use.)
*
* We also must cheat if reindexing pg_class itself, because the target index
* may presently not be part of the set of indexes that
* We also must cheat if reindexing pg_class itself, because the target
* index may presently not be part of the set of indexes that
* CatalogUpdateIndexes would update (see reindex_relation). In this case
* the stats updates will not be WAL-logged and so could be lost in a
* crash. This seems OK considering VACUUM does the same thing.
@ -1745,9 +1745,10 @@ reindex_relation(Oid relid, bool toast_too)
* entry for its own pg_class row because we do setNewRelfilenode() before
* we do index_build().
*
* Note that we also clear pg_class's rd_oidindex until the loop is done, so
* that that index can't be accessed either. This means we cannot safely
* generate new relation OIDs while in the loop; shouldn't be a problem.
* Note that we also clear pg_class's rd_oidindex until the loop is done,
* so that that index can't be accessed either. This means we cannot
* safely generate new relation OIDs while in the loop; shouldn't be a
* problem.
*/
is_pg_class = (RelationGetRelid(rel) == RelationRelationId);
doneIndexes = NIL;

View File

@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.79 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.80 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -958,10 +958,11 @@ OpclassGetCandidates(Oid amid)
* something we already accepted? If so, keep only the one that
* appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the normal case),
* then any conflicting opclass must immediately adjoin this one in
* the list, so we only need to look at the newest result item. If we
* have an unordered list, we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting opclass must immediately adjoin this
* one in the list, so we only need to look at the newest result item.
* If we have an unordered list, we have to scan the whole result
* list.
*/
if (resultList)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.76 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.77 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -104,10 +104,10 @@ AggregateCreate(const char *aggName,
* enforce_generic_type_consistency, if transtype isn't polymorphic) must
* exactly match declared transtype.
*
* In the non-polymorphic-transtype case, it might be okay to allow a rettype
* that's binary-coercible to transtype, but I'm not quite convinced that
* it's either safe or useful. When transtype is polymorphic we *must*
* demand exact equality.
* In the non-polymorphic-transtype case, it might be okay to allow a
* rettype that's binary-coercible to transtype, but I'm not quite
* convinced that it's either safe or useful. When transtype is
* polymorphic we *must* demand exact equality.
*/
if (rettype != aggTransType)
ereport(ERROR,

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.27 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.28 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -497,8 +497,8 @@ RemoveConstraintById(Oid conId)
/*
* XXX for now, do nothing special when dropping a domain constraint
*
* Probably there should be some form of locking on the domain type, but
* we have no such concept at the moment.
* Probably there should be some form of locking on the domain type,
* but we have no such concept at the moment.
*/
}
else

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.16 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.17 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -176,8 +176,8 @@ bool
objectIsInternalDependency(Oid classId, Oid objectId)
{
Relation depRel;
ScanKeyData key[2];
SysScanDesc scan;
ScanKeyData key[2];
SysScanDesc scan;
HeapTuple tup;
bool isdep = false;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.136 2005/11/17 22:14:51 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.137 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -119,7 +119,7 @@ ProcedureCreate(const char *procedureName,
* need to use deconstruct_array() since the array data is just going
* to look like a C array of OID values.
*/
ArrayType *allParamArray = (ArrayType *) DatumGetPointer(allParameterTypes);
ArrayType *allParamArray = (ArrayType *) DatumGetPointer(allParameterTypes);
allParamCount = ARR_DIMS(allParamArray)[0];
if (ARR_NDIM(allParamArray) != 1 ||
@ -584,8 +584,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
* expression results will be unresolvable. The check will be done at
* runtime instead.
*
* We can run the text through the raw parser though; this will at least
* catch silly syntactic errors.
* We can run the text through the raw parser though; this will at
* least catch silly syntactic errors.
*/
if (!haspolyarg)
{
@ -654,8 +654,8 @@ function_parse_error_transpose(const char *prosrc)
* Nothing to do unless we are dealing with a syntax error that has a
* cursor position.
*
* Some PLs may prefer to report the error position as an internal error to
* begin with, so check that too.
* Some PLs may prefer to report the error position as an internal error
* to begin with, so check that too.
*/
origerrposition = geterrposition();
if (origerrposition <= 0)
@ -770,8 +770,8 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
* string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
* We do the comparison a character at a time, not a byte at a time, so that
* we can do the correct cursorpos math.
* We do the comparison a character at a time, not a byte at a time, so
* that we can do the correct cursorpos math.
*/
while (*prosrc)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.4 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.5 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -415,8 +415,8 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant,
/*
* Skip the owner: he has an OWNER shdep entry instead. (This is
* not just a space optimization; it makes ALTER OWNER easier.
* See notes in changeDependencyOnOwner.)
* not just a space optimization; it makes ALTER OWNER easier. See
* notes in changeDependencyOnOwner.)
*/
if (roleid == ownerId)
continue;
@ -585,8 +585,8 @@ checkSharedDependencies(Oid classId, Oid objectId)
/*
* Report seems unreasonably long, so reduce it to per-database info
*
* Note: we don't ever suppress per-database totals, which should be OK
* as long as there aren't too many databases ...
* Note: we don't ever suppress per-database totals, which should be
* OK as long as there aren't too many databases ...
*/
descs.len = 0; /* reset to empty */
descs.data[0] = '\0';
@ -1059,7 +1059,7 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
/*
* shdepDropOwned
*
* Drop the objects owned by any one of the given RoleIds. If a role has
* Drop the objects owned by any one of the given RoleIds. If a role has
* access to an object, the grant will be removed as well (but the object
* will not, of course.)
*/
@ -1078,8 +1078,8 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
foreach(cell, roleids)
{
Oid roleid = lfirst_oid(cell);
ScanKeyData key[2];
SysScanDesc scan;
ScanKeyData key[2];
SysScanDesc scan;
HeapTuple tuple;
/* Doesn't work for pinned objects */
@ -1093,9 +1093,9 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("cannot drop objects owned by %s because they are "
"required by the database system",
getObjectDescription(&obj))));
errmsg("cannot drop objects owned by %s because they are "
"required by the database system",
getObjectDescription(&obj))));
}
ScanKeyInit(&key[0],
@ -1120,10 +1120,10 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
switch (sdepForm->deptype)
{
ObjectAddress obj;
GrantObjectType objtype;
ObjectAddress obj;
GrantObjectType objtype;
/* Shouldn't happen */
/* Shouldn't happen */
case SHARED_DEPENDENCY_PIN:
case SHARED_DEPENDENCY_INVALID:
elog(ERROR, "unexpected dependency type");
@ -1163,10 +1163,11 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
false, DROP_CASCADE);
break;
case SHARED_DEPENDENCY_OWNER:
/*
* If there's a regular (non-shared) dependency on this
* object marked with DEPENDENCY_INTERNAL, skip this
* object. We will drop the referencer object instead.
* object. We will drop the referencer object instead.
*/
if (objectIsInternalDependency(sdepForm->classid, sdepForm->objid))
continue;
@ -1195,8 +1196,8 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
void
shdepReassignOwned(List *roleids, Oid newrole)
{
Relation sdepRel;
ListCell *cell;
Relation sdepRel;
ListCell *cell;
sdepRel = heap_open(SharedDependRelationId, AccessShareLock);
@ -1218,9 +1219,10 @@ shdepReassignOwned(List *roleids, Oid newrole)
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("cannot drop objects owned by %s because they are "
"required by the database system",
getObjectDescription(&obj))));
errmsg("cannot drop objects owned by %s because they are "
"required by the database system",
getObjectDescription(&obj))));
/*
* There's no need to tell the whole truth, which is that we
* didn't track these dependencies at all ...
@ -1235,7 +1237,7 @@ shdepReassignOwned(List *roleids, Oid newrole)
Anum_pg_shdepend_refobjid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(roleid));
scan = systable_beginscan(sdepRel, SharedDependReferenceIndexId, true,
SnapshotNow, 2, key);
@ -1256,9 +1258,9 @@ shdepReassignOwned(List *roleids, Oid newrole)
continue;
/*
* If there's a regular (non-shared) dependency on this
* object marked with DEPENDENCY_INTERNAL, skip this
* object. We will alter the referencer object instead.
* If there's a regular (non-shared) dependency on this object
* marked with DEPENDENCY_INTERNAL, skip this object. We will
* alter the referencer object instead.
*/
if (objectIsInternalDependency(sdepForm->classid, sdepForm->objid))
continue;

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.31 2005/11/22 18:17:08 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -119,8 +119,8 @@ DefineAggregate(List *names, List *parameters)
/*
* look up the aggregate's base type (input datatype) and transtype.
*
* We have historically allowed the command to look like basetype = 'ANY' so
* we must do a case-insensitive comparison for the name ANY. Ugh.
* We have historically allowed the command to look like basetype = 'ANY'
* so we must do a case-insensitive comparison for the name ANY. Ugh.
*
* basetype can be a pseudo-type, but transtype can't, since we need to be
* able to store values of the transtype. However, we can allow

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89 2005/10/15 02:49:15 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.90 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -891,9 +891,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* If we didn't find as many tuples as we wanted then we're done. No sort
* is needed, since they're already in order.
*
* Otherwise we need to sort the collected tuples by position (itempointer).
* It's not worth worrying about corner cases where the tuples are already
* sorted.
* Otherwise we need to sort the collected tuples by position
* (itempointer). It's not worth worrying about corner cases where the
* tuples are already sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
@ -1849,9 +1849,9 @@ compute_scalar_stats(VacAttrStatsP stats,
* Now scan the values in order, find the most common ones, and also
* accumulate ordering-correlation statistics.
*
* To determine which are most common, we first have to count the number
* of duplicates of each value. The duplicates are adjacent in the
* sorted list, so a brute-force approach is to compare successive
* To determine which are most common, we first have to count the
* number of duplicates of each value. The duplicates are adjacent in
* the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
* completely redundant with work that was done during the sort. (The

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.127 2005/11/03 17:11:34 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.128 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -820,18 +820,18 @@ EnableNotifyInterrupt(void)
* steps. (A very small time window, perhaps, but Murphy's Law says you
* can hit it...) Instead, we first set the enable flag, then test the
* occurred flag. If we see an unserviced interrupt has occurred, we
* re-clear the enable flag before going off to do the service work.
* (That prevents re-entrant invocation of ProcessIncomingNotify() if
* another interrupt occurs.) If an interrupt comes in between the setting
* and clearing of notifyInterruptEnabled, then it will have done the
* service work and left notifyInterruptOccurred zero, so we have to check
* again after clearing enable. The whole thing has to be in a loop in
* case another interrupt occurs while we're servicing the first. Once we
* get out of the loop, enable is set and we know there is no unserviced
* re-clear the enable flag before going off to do the service work. (That
* prevents re-entrant invocation of ProcessIncomingNotify() if another
* interrupt occurs.) If an interrupt comes in between the setting and
* clearing of notifyInterruptEnabled, then it will have done the service
* work and left notifyInterruptOccurred zero, so we have to check again
* after clearing enable. The whole thing has to be in a loop in case
* another interrupt occurs while we're servicing the first. Once we get
* out of the loop, enable is set and we know there is no unserviced
* interrupt.
*
* NB: an overenthusiastic optimizing compiler could easily break this code.
* Hopefully, they all understand what "volatile" means these days.
* NB: an overenthusiastic optimizing compiler could easily break this
* code. Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.141 2005/10/29 00:31:51 petere Exp $
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.142 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -177,8 +177,8 @@ cluster(ClusterStmt *stmt)
/*
* Create special memory context for cross-transaction storage.
*
* Since it is a child of PortalContext, it will go away even in case of
* error.
* Since it is a child of PortalContext, it will go away even in case
* of error.
*/
cluster_context = AllocSetContextCreate(PortalContext,
"Cluster",
@ -242,9 +242,9 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* Since we may open a new transaction for each relation, we have to check
* that the relation still is what we think it is.
*
* If this is a single-transaction CLUSTER, we can skip these tests. We *must*
* skip the one on indisclustered since it would reject an attempt to
* cluster a not-previously-clustered index.
* If this is a single-transaction CLUSTER, we can skip these tests. We
* *must* skip the one on indisclustered since it would reject an attempt
* to cluster a not-previously-clustered index.
*/
if (recheck)
{
@ -360,9 +360,9 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
RelationGetRelationName(OldIndex)),
recheck
? errhint("You may be able to work around this by marking column \"%s\" NOT NULL, or use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster specification from the table.",
NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))
NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))
: errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
}
else if (colno < 0)
{
@ -651,12 +651,13 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
* We cannot simply pass the tuple to heap_insert(), for several
* reasons:
*
* 1. heap_insert() will overwrite the commit-status fields of the tuple
* it's handed. This would trash the source relation, which is bad
* news if we abort later on. (This was a bug in releases thru 7.0)
* 1. heap_insert() will overwrite the commit-status fields of the
* tuple it's handed. This would trash the source relation, which is
* bad news if we abort later on. (This was a bug in releases thru
* 7.0)
*
* 2. We'd like to squeeze out the values of any dropped columns, both to
* save space and to ensure we have no corner-case failures. (It's
* 2. We'd like to squeeze out the values of any dropped columns, both
* to save space and to ensure we have no corner-case failures. (It's
* possible for example that the new table hasn't got a TOAST table
* and so is unable to store any large values of dropped cols.)
*
@ -788,10 +789,10 @@ swap_relation_files(Oid r1, Oid r2)
* happen in CLUSTER if there were dropped columns in the old table, and
* in ALTER TABLE when adding or changing type of columns.
*
* NOTE: at present, a TOAST table's only dependency is the one on its owning
* table. If more are ever created, we'd need to use something more
* selective than deleteDependencyRecordsFor() to get rid of only the link
* we want.
* NOTE: at present, a TOAST table's only dependency is the one on its
* owning table. If more are ever created, we'd need to use something
* more selective than deleteDependencyRecordsFor() to get rid of only the
* link we want.
*/
if (relform1->reltoastrelid || relform2->reltoastrelid)
{

View File

@ -7,7 +7,7 @@
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84 2005/10/15 02:49:15 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.85 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -445,8 +445,8 @@ CommentDatabase(List *qualname, char *comment)
* comment on a database other than the current one. Someday this might be
* improved, but it would take a redesigned infrastructure.
*
* When loading a dump, we may see a COMMENT ON DATABASE for the old name of
* the database. Erroring out would prevent pg_restore from completing
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
* of the database. Erroring out would prevent pg_restore from completing
* (which is really pg_restore's fault, but for now we will work around
* the problem here). Consensus is that the best fix is to treat wrong
* database name as a WARNING not an ERROR.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.25 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.26 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -31,7 +31,7 @@
#include "utils/syscache.h"
static void AlterConversionOwner_internal(Relation rel, Oid conversionOid,
Oid newOwnerId);
Oid newOwnerId);
/*
* CREATE CONVERSION
@ -107,7 +107,7 @@ DropConversionCommand(List *name, DropBehavior behavior, bool missing_ok)
conversionOid = FindConversionByName(name);
if (!OidIsValid(conversionOid))
{
if (! missing_ok)
if (!missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@ -117,7 +117,7 @@ DropConversionCommand(List *name, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
(errmsg("conversion \"%s\" does not exist, skipping",
(errmsg("conversion \"%s\" does not exist, skipping",
NameListToString(name))));
}
@ -218,7 +218,7 @@ AlterConversionOwner_oid(Oid conversionOid, Oid newOwnerId)
Relation rel;
rel = heap_open(ConversionRelationId, RowExclusiveLock);
AlterConversionOwner_internal(rel, conversionOid, newOwnerId);
heap_close(rel, NoLock);
@ -234,7 +234,7 @@ static void
AlterConversionOwner_internal(Relation rel, Oid conversionOid, Oid newOwnerId)
{
Form_pg_conversion convForm;
HeapTuple tup;
HeapTuple tup;
Assert(RelationGetRelid(rel) == ConversionRelationId);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.254 2005/11/03 17:11:34 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.255 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -127,8 +127,8 @@ typedef struct CopyStateData
/*
* These variables are used to reduce overhead in textual COPY FROM.
*
* attribute_buf holds the separated, de-escaped text for each field of the
* current line. The CopyReadAttributes functions return arrays of
* attribute_buf holds the separated, de-escaped text for each field of
* the current line. The CopyReadAttributes functions return arrays of
* pointers into this buffer. We avoid palloc/pfree overhead by re-using
* the buffer on each cycle.
*/
@ -2085,8 +2085,8 @@ CopyReadLineText(CopyState cstate)
* examine; any characters from raw_buf_index to raw_buf_ptr have been
* determined to be part of the line, but not yet transferred to line_buf.
*
* For a little extra speed within the loop, we copy raw_buf and raw_buf_len
* into local variables.
* For a little extra speed within the loop, we copy raw_buf and
* raw_buf_len into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@ -2148,8 +2148,8 @@ CopyReadLineText(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
* Note that if we are at EOF, c will wind up as '\0' because of
* the guaranteed pad of raw_buf.
* Note that if we are at EOF, c will wind up as '\0' because
* of the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
@ -2283,8 +2283,8 @@ CopyReadLineText(CopyState cstate)
* Do we need to be careful about trailing bytes of multibyte
* characters? (See note above about client_only_encoding)
*
* We assume here that pg_encoding_mblen only looks at the first byte of
* the character!
* We assume here that pg_encoding_mblen only looks at the first byte
* of the character!
*/
if (cstate->client_only_encoding)
{
@ -2369,8 +2369,8 @@ CopyReadLineCSV(CopyState cstate)
* examine; any characters from raw_buf_index to raw_buf_ptr have been
* determined to be part of the line, but not yet transferred to line_buf.
*
* For a little extra speed within the loop, we copy raw_buf and raw_buf_len
* into local variables.
* For a little extra speed within the loop, we copy raw_buf and
* raw_buf_len into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@ -2475,8 +2475,8 @@ CopyReadLineCSV(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
* Note that if we are at EOF, c will wind up as '\0' because of
* the guaranteed pad of raw_buf.
* Note that if we are at EOF, c will wind up as '\0' because
* of the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
@ -2621,8 +2621,8 @@ CopyReadLineCSV(CopyState cstate)
* Do we need to be careful about trailing bytes of multibyte
* characters? (See note above about client_only_encoding)
*
* We assume here that pg_encoding_mblen only looks at the first byte of
* the character!
* We assume here that pg_encoding_mblen only looks at the first byte
* of the character!
*/
if (cstate->client_only_encoding)
{

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.174 2005/11/22 15:24:17 adunstan Exp $
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.175 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -346,8 +346,8 @@ createdb(const CreatedbStmt *stmt)
src_vacuumxid = src_frozenxid = GetCurrentTransactionId();
/*
* Preassign OID for pg_database tuple, so that we can compute db path.
* We have to open pg_database to do this, but we don't want to take
* Preassign OID for pg_database tuple, so that we can compute db path. We
* have to open pg_database to do this, but we don't want to take
* ExclusiveLock yet, so just do it and close again.
*/
pg_database_rel = heap_open(DatabaseRelationId, AccessShareLock);
@ -512,14 +512,14 @@ createdb(const CreatedbStmt *stmt)
*
* (Both of these were real bugs in releases 8.0 through 8.0.3.)
*
* In PITR replay, the first of these isn't an issue, and the second is
* only a risk if the CREATE DATABASE and subsequent template database
* change both occur while a base backup is being taken. There doesn't
* seem to be much we can do about that except document it as a
* limitation.
* In PITR replay, the first of these isn't an issue, and the second
* is only a risk if the CREATE DATABASE and subsequent template
* database change both occur while a base backup is being taken.
* There doesn't seem to be much we can do about that except document
* it as a limitation.
*
* Perhaps if we ever implement CREATE DATABASE in a less cheesy way, we
* can avoid this.
* Perhaps if we ever implement CREATE DATABASE in a less cheesy way,
* we can avoid this.
*/
RequestCheckpoint(true, false);
@ -586,19 +586,19 @@ dropdb(const char *dbname, bool missing_ok)
if (!get_db_info(dbname, &db_id, NULL, NULL,
&db_istemplate, NULL, NULL, NULL, NULL, NULL))
{
if (! missing_ok)
if (!missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname)));
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname)));
}
else
{
/* Close pg_database, release the lock, since we changed nothing */
heap_close(pgdbrel, ExclusiveLock);
ereport(NOTICE,
(errmsg("database \"%s\" does not exist, skipping",
ereport(NOTICE,
(errmsg("database \"%s\" does not exist, skipping",
dbname)));
return;
@ -658,8 +658,8 @@ dropdb(const char *dbname, bool missing_ok)
/*
* Delete any comments associated with the database
*
* NOTE: this is probably dead code since any such comments should have been
* in that database, not mine.
* NOTE: this is probably dead code since any such comments should have
* been in that database, not mine.
*/
DeleteComments(db_id, DatabaseRelationId, 0);

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.139 2005/10/21 16:43:33 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.140 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -199,9 +199,9 @@ ExplainOneQuery(Query *query, ExplainStmt *stmt, TupOutputState *tstate)
/*
* Update snapshot command ID to ensure this query sees results of any
* previously executed queries. (It's a bit cheesy to modify
* ActiveSnapshot without making a copy, but for the limited ways in
* which EXPLAIN can be invoked, I think it's OK, because the active
* snapshot shouldn't be shared with anything else anyway.)
* ActiveSnapshot without making a copy, but for the limited ways in which
* EXPLAIN can be invoked, I think it's OK, because the active snapshot
* shouldn't be shared with anything else anyway.)
*/
ActiveSnapshot->curcid = GetCurrentCommandId();

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.135 2005/11/07 17:36:45 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.136 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -358,10 +358,10 @@ DefineIndex(RangeVar *heapRelation,
* we don't cascade the notnull constraint(s) either; but this is
* pretty debatable.
*
* XXX: possible future improvement: when being called from ALTER TABLE,
* it would be more efficient to merge this with the outer ALTER
* TABLE, so as to avoid two scans. But that seems to complicate
* DefineIndex's API unduly.
* XXX: possible future improvement: when being called from ALTER
* TABLE, it would be more efficient to merge this with the outer
* ALTER TABLE, so as to avoid two scans. But that seems to
* complicate DefineIndex's API unduly.
*/
if (cmds)
AlterTableInternal(relationId, cmds, false);
@ -568,8 +568,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
* Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
* 2000/07/30
*
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that too
* for awhile. I'm starting to think we need a better approach. tgl
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
* too for awhile. I'm starting to think we need a better approach. tgl
* 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.39 2005/11/21 12:49:31 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.40 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -59,7 +59,7 @@ static void addClassMember(List **list, OpClassMember *member, bool isProc);
static void storeOperators(Oid opclassoid, List *operators);
static void storeProcedures(Oid opclassoid, List *procedures);
static void AlterOpClassOwner_internal(Relation rel, HeapTuple tuple,
Oid newOwnerId);
Oid newOwnerId);
/*
@ -894,7 +894,7 @@ AlterOpClassOwner_oid(Oid opcOid, Oid newOwnerId)
tup = SearchSysCacheCopy(CLAOID,
ObjectIdGetDatum(opcOid),
0, 0, 0);
if (!HeapTupleIsValid(tup)) /* shouldn't happen */
if (!HeapTupleIsValid(tup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for opclass %u", opcOid);
AlterOpClassOwner_internal(rel, tup, newOwnerId);
@ -933,7 +933,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId)
if (schemaname)
{
Oid namespaceOid;
Oid namespaceOid;
namespaceOid = LookupExplicitNamespace(schemaname);
@ -950,7 +950,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId)
}
else
{
Oid opcOid;
Oid opcOid;
opcOid = OpclassnameGetOpcid(amOid, opcname);
if (!OidIsValid(opcOid))

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.37 2005/11/21 12:49:31 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.38 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -175,7 +175,7 @@ RemoveSchema(List *names, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
(errmsg("schema \"%s\" does not exist, skipping",
(errmsg("schema \"%s\" does not exist, skipping",
namespaceName)));
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.125 2005/10/15 02:49:15 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.126 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -219,8 +219,8 @@ DefineSequence(CreateSeqStmt *seq)
/*
* Two special hacks here:
*
* 1. Since VACUUM does not process sequences, we have to force the tuple to
* have xmin = FrozenTransactionId now. Otherwise it would become
* 1. Since VACUUM does not process sequences, we have to force the tuple
* to have xmin = FrozenTransactionId now. Otherwise it would become
* invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
@ -459,10 +459,10 @@ nextval_internal(Oid relid)
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
* cache. (These will then be usable without logging.)
*
* If this is the first nextval after a checkpoint, we must force a new WAL
* record to be written anyway, else replay starting from the checkpoint
* would fail to advance the sequence past the logged values. In this
* case we may as well fetch extra values.
* If this is the first nextval after a checkpoint, we must force a new
* WAL record to be written anyway, else replay starting from the
* checkpoint would fail to advance the sequence past the logged values.
* In this case we may as well fetch extra values.
*/
if (log < fetch)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.175 2005/11/21 12:49:31 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.176 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -907,9 +907,9 @@ MergeAttributes(List *schema, List *supers, bool istemp,
* If default expr could contain any vars, we'd need to fix
* 'em, but it can't; so default is ready to apply to child.
*
* If we already had a default from some prior parent, check to
* see if they are the same. If so, no problem; if not, mark
* the column as having a bogus default. Below, we will
* If we already had a default from some prior parent, check
* to see if they are the same. If so, no problem; if not,
* mark the column as having a bogus default. Below, we will
* complain if the bogus default isn't overridden by the child
* schema.
*/
@ -1124,9 +1124,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
* Also enter dependencies on the direct ancestors, and make sure they are
* marked with relhassubclass = true.
*
* (Once upon a time, both direct and indirect ancestors were found here and
* then entered into pg_ipl. Since that catalog doesn't exist anymore,
* there's no need to look for indirect ancestors.)
* (Once upon a time, both direct and indirect ancestors were found here
* and then entered into pg_ipl. Since that catalog doesn't exist
* anymore, there's no need to look for indirect ancestors.)
*/
relation = heap_open(InheritsRelationId, RowExclusiveLock);
desc = RelationGetDescr(relation);
@ -1216,8 +1216,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
*
* If the tuple already has the right relhassubclass setting, we don't need
* to update it, but we still need to issue an SI inval message.
* If the tuple already has the right relhassubclass setting, we don't
* need to update it, but we still need to issue an SI inval message.
*/
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@ -1301,8 +1301,8 @@ renameatt(Oid myrelid,
* attribute in all classes that inherit from 'relname' (as well as in
* 'relname').
*
* any permissions or problems with duplicate attributes will cause the whole
* transaction to abort, which is what we want -- all or nothing.
* any permissions or problems with duplicate attributes will cause the
* whole transaction to abort, which is what we want -- all or nothing.
*/
if (recurse)
{
@ -1632,8 +1632,8 @@ update_ri_trigger_args(Oid relid,
/*
* It is an RI trigger, so parse the tgargs bytea.
*
* NB: we assume the field will never be compressed or moved out of line;
* so does trigger.c ...
* NB: we assume the field will never be compressed or moved out of
* line; so does trigger.c ...
*/
tgnargs = pg_trigger->tgnargs;
val = (bytea *)
@ -2392,9 +2392,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
* If we need to rewrite the table, the operation has to be propagated to
* tables that use this table's rowtype as a column type.
*
* (Eventually this will probably become true for scans as well, but at the
* moment a composite type does not enforce any constraints, so it's not
* necessary/appropriate to enforce them just during ALTER.)
* (Eventually this will probably become true for scans as well, but at
* the moment a composite type does not enforce any constraints, so it's
* not necessary/appropriate to enforce them just during ALTER.)
*/
if (newrel)
find_composite_type_dependencies(oldrel->rd_rel->reltype,
@ -2836,9 +2836,9 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
/*
* Recurse to add the column to child classes, if requested.
*
* We must recurse one level at a time, so that multiply-inheriting children
* are visited the right number of times and end up with the right
* attinhcount.
* We must recurse one level at a time, so that multiply-inheriting
* children are visited the right number of times and end up with the
* right attinhcount.
*/
if (recurse)
{
@ -3038,8 +3038,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
/*
* Tell Phase 3 to fill in the default expression, if there is one.
*
* If there is no default, Phase 3 doesn't have to do anything, because that
* effectively means that the default is NULL. The heap tuple access
* If there is no default, Phase 3 doesn't have to do anything, because
* that effectively means that the default is NULL. The heap tuple access
* routines always check for attnum > # of attributes in tuple, and return
* NULL if so, so without any modification of the tuple data we will get
* the effect of NULL values in the new column.
@ -3832,8 +3832,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Validity and permissions checks
*
* Note: REFERENCES permissions checks are redundant with CREATE TRIGGER, but
* we may as well error out sooner instead of later.
* Note: REFERENCES permissions checks are redundant with CREATE TRIGGER,
* but we may as well error out sooner instead of later.
*/
if (pkrel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
@ -3931,9 +3931,9 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* pktypoid[i] is the primary key table's i'th key's type fktypoid[i]
* is the foreign key table's i'th key's type
*
* Note that we look for an operator with the PK type on the left; when
* the types are different this is critical because the PK index will
* need operators with the indexkey on the left. (Ordinarily both
* Note that we look for an operator with the PK type on the left;
* when the types are different this is critical because the PK index
* will need operators with the indexkey on the left. (Ordinarily both
* commutator operators will exist if either does, but we won't get
* the right answer from the test below on opclass membership unless
* we select the proper operator.)
@ -4861,10 +4861,10 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* the column type, because build_column_default itself will try to
* coerce, and will not issue the error message we want if it fails.)
*
* We remove any implicit coercion steps at the top level of the old default
* expression; this has been agreed to satisfy the principle of least
* surprise. (The conversion to the new column type should act like it
* started from what the user sees as the stored expression, and the
* We remove any implicit coercion steps at the top level of the old
* default expression; this has been agreed to satisfy the principle of
* least surprise. (The conversion to the new column type should act like
* it started from what the user sees as the stored expression, and the
* implicit coercions aren't going to be shown.)
*/
if (attTup->atthasdef)
@ -4895,8 +4895,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* the info before executing ALTER TYPE, though, else the deparser will
* get confused.
*
* There could be multiple entries for the same object, so we must check to
* ensure we process each one only once. Note: we assume that an index
* There could be multiple entries for the same object, so we must check
* to ensure we process each one only once. Note: we assume that an index
* that implements a constraint will not show a direct dependency on the
* column.
*/
@ -5781,9 +5781,9 @@ copy_relation_data(Relation rel, SMgrRelation dst)
* to commit the transaction. (For a temp rel we don't care since the rel
* will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the copy. It's less
* obvious that we have to do it even if we did WAL-log the copied pages.
* The reason is that since we're copying outside shared buffers, a
* It's obvious that we must do this when not WAL-logging the copy. It's
* less obvious that we have to do it even if we did WAL-log the copied
* pages. The reason is that since we're copying outside shared buffers, a
* CHECKPOINT occurring during the copy has no way to flush the previously
* written data to disk (indeed it won't know the new rel even exists). A
* crash later on would replay WAL from the checkpoint, therefore it
@ -5841,12 +5841,12 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
/*
* Toast table is shared if and only if its parent is.
*
* We cannot allow toasting a shared relation after initdb (because there's
* no way to mark it toasted in other databases' pg_class). Unfortunately
* we can't distinguish initdb from a manually started standalone backend
* (toasting happens after the bootstrap phase, so checking
* IsBootstrapProcessingMode() won't work). However, we can at least
* prevent this mistake under normal multi-user operation.
* We cannot allow toasting a shared relation after initdb (because
* there's no way to mark it toasted in other databases' pg_class).
* Unfortunately we can't distinguish initdb from a manually started
* standalone backend (toasting happens after the bootstrap phase, so
* checking IsBootstrapProcessingMode() won't work). However, we can at
* least prevent this mistake under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
if (shared_relation && IsUnderPostmaster)

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.196 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.197 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -566,8 +566,8 @@ RemoveTriggerById(Oid trigOid)
* (and this one too!) are sent SI message to make them rebuild relcache
* entries.
*
* Note this is OK only because we have AccessExclusiveLock on the rel, so no
* one else is creating/deleting triggers on this rel at the same time.
* Note this is OK only because we have AccessExclusiveLock on the rel, so
* no one else is creating/deleting triggers on this rel at the same time.
*/
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@ -1182,8 +1182,8 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
* we have the same triggers with the same types, the derived index data
* should match.
*
* As of 7.3 we assume trigger set ordering is significant in the comparison;
* so we just compare corresponding slots of the two sets.
* As of 7.3 we assume trigger set ordering is significant in the
* comparison; so we just compare corresponding slots of the two sets.
*/
if (trigdesc1 != NULL)
{
@ -2533,13 +2533,14 @@ AfterTriggerEndQuery(EState *estate)
* Process all immediate-mode triggers queued by the query, and move the
* deferred ones to the main list of deferred events.
*
* Notice that we decide which ones will be fired, and put the deferred ones
* on the main list, before anything is actually fired. This ensures
* Notice that we decide which ones will be fired, and put the deferred
* ones on the main list, before anything is actually fired. This ensures
* reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
* IMMEDIATE: all events we have decided to defer will be available for it
* to fire.
*
* If we find no firable events, we don't have to increment firing_counter.
* If we find no firable events, we don't have to increment
* firing_counter.
*/
events = &afterTriggers->query_stack[afterTriggers->query_depth];
if (afterTriggerMarkEvents(events, &afterTriggers->events, true))
@ -3026,8 +3027,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
* list of previously deferred events to fire any that have now become
* immediate.
*
* Obviously, if this was SET ... DEFERRED then it can't have converted any
* unfired events to immediate, so we need do nothing in that case.
* Obviously, if this was SET ... DEFERRED then it can't have converted
* any unfired events to immediate, so we need do nothing in that case.
*/
if (!stmt->deferred)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.84 2005/11/21 12:49:31 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.85 2005/11/22 18:17:09 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -425,7 +425,7 @@ RemoveType(List *names, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
(errmsg("type \"%s\" does not exist, skipping",
(errmsg("type \"%s\" does not exist, skipping",
TypeNameToString(typename))));
}
@ -820,7 +820,7 @@ RemoveDomain(List *names, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
(errmsg("type \"%s\" does not exist, skipping",
(errmsg("type \"%s\" does not exist, skipping",
TypeNameToString(typename))));
}
@ -879,8 +879,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
* Input functions can take a single argument of type CSTRING, or three
* arguments (string, typioparam OID, typmod).
*
* For backwards compatibility we allow OPAQUE in place of CSTRING; if we see
* this, we issue a warning and fix up the pg_proc entry.
* For backwards compatibility we allow OPAQUE in place of CSTRING; if we
* see this, we issue a warning and fix up the pg_proc entry.
*/
argList[0] = CSTRINGOID;
@ -1864,8 +1864,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Deparse it to produce text for consrc.
*
* Since VARNOs aren't allowed in domain constraints, relation context isn't
* required as anything other than a shell.
* Since VARNOs aren't allowed in domain constraints, relation context
* isn't required as anything other than a shell.
*/
ccsrc = deparse_expression(expr,
deparse_context_for(domainName,

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.165 2005/11/21 12:49:31 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.166 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1124,15 +1124,15 @@ GrantRole(GrantRoleStmt *stmt)
* Drop the objects owned by a given list of roles.
*/
void
DropOwnedObjects(DropOwnedStmt *stmt)
DropOwnedObjects(DropOwnedStmt * stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
/* Check privileges */
foreach (cell, role_ids)
foreach(cell, role_ids)
{
Oid roleid = lfirst_oid(cell);
Oid roleid = lfirst_oid(cell);
if (!has_privs_of_role(GetUserId(), roleid))
ereport(ERROR,
@ -1150,16 +1150,16 @@ DropOwnedObjects(DropOwnedStmt *stmt)
* Give the objects owned by a given list of roles away to another user.
*/
void
ReassignOwnedObjects(ReassignOwnedStmt *stmt)
ReassignOwnedObjects(ReassignOwnedStmt * stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
Oid newrole;
/* Check privileges */
foreach (cell, role_ids)
foreach(cell, role_ids)
{
Oid roleid = lfirst_oid(cell);
Oid roleid = lfirst_oid(cell);
if (!has_privs_of_role(GetUserId(), roleid))
ereport(ERROR,
@ -1171,9 +1171,9 @@ ReassignOwnedObjects(ReassignOwnedStmt *stmt)
newrole = get_roleid_checked(stmt->newrole);
if (!has_privs_of_role(GetUserId(), newrole))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to reassign objects")));
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to reassign objects")));
/* Ok, do it */
shdepReassignOwned(role_ids, newrole);

View File

@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.318 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.319 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -313,8 +313,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
* compared to telling people to use two operations. See pgsql-hackers
* discussion of 27-Nov-2004, and comments below for update_hint_bits().
*
* Note: this is enforced here, and not in the grammar, since (a) we can give
* a better error message, and (b) we might want to allow it again
* Note: this is enforced here, and not in the grammar, since (a) we can
* give a better error message, and (b) we might want to allow it again
* someday.
*/
if (vacstmt->vacuum && vacstmt->full && vacstmt->freeze)
@ -332,8 +332,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Create special memory context for cross-transaction storage.
*
* Since it is a child of PortalContext, it will go away eventually even if
* we suffer an error; there's no need for special abort cleanup logic.
* Since it is a child of PortalContext, it will go away eventually even
* if we suffer an error; there's no need for special abort cleanup logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
@ -355,14 +355,14 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* It's a database-wide VACUUM.
*
* Compute the initially applicable OldestXmin and FreezeLimit XIDs, so
* that we can record these values at the end of the VACUUM. Note that
* individual tables may well be processed with newer values, but we
* can guarantee that no (non-shared) relations are processed with
* Compute the initially applicable OldestXmin and FreezeLimit XIDs,
* so that we can record these values at the end of the VACUUM. Note
* that individual tables may well be processed with newer values, but
* we can guarantee that no (non-shared) relations are processed with
* older ones.
*
* It is okay to record non-shared values in pg_database, even though we
* may vacuum shared relations with older cutoffs, because only the
* It is okay to record non-shared values in pg_database, even though
* we may vacuum shared relations with older cutoffs, because only the
* minimum of the values present in pg_database matters. We can be
* sure that shared relations have at some time been vacuumed with
* cutoffs no worse than the global minimum; for, if there is a
@ -379,8 +379,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Decide whether we need to start/commit our own transactions.
*
* For VACUUM (with or without ANALYZE): always do so, so that we can release
* locks as soon as possible. (We could possibly use the outer
* For VACUUM (with or without ANALYZE): always do so, so that we can
* release locks as soon as possible. (We could possibly use the outer
* transaction for a one-table VACUUM, but handling TOAST tables would be
* problematic.)
*
@ -981,21 +981,20 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/*
* Determine the type of lock we want --- hard exclusive lock for a FULL
* vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum.
* Either way, we can be sure that no other backend is vacuuming the same
* table.
* vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
* way, we can be sure that no other backend is vacuuming the same table.
*/
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/*
* Open the class, get an appropriate lock on it, and check permissions.
*
* We allow the user to vacuum a table if he is superuser, the table owner,
* or the database owner (but in the latter case, only if it's not a
* shared relation). pg_class_ownercheck includes the superuser case.
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's not
* a shared relation). pg_class_ownercheck includes the superuser case.
*
* Note we choose to treat permissions failure as a WARNING and keep trying
* to vacuum the rest of the DB --- is this appropriate?
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
*/
onerel = relation_open(relid, lmode);
@ -1660,8 +1659,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* find a page we cannot completely empty (this last condition is handled
* by "break" statements within the loop).
*
* NB: this code depends on the vacuum_pages and fraged_pages lists being in
* order by blkno.
* NB: this code depends on the vacuum_pages and fraged_pages lists being
* in order by blkno.
*/
nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
@ -1684,9 +1683,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* since we stop the outer loop at last_move_dest_block, pages removed
* here cannot have had anything moved onto them already.
*
* Also note that we don't change the stored fraged_pages list, only our
* local variable num_fraged_pages; so the forgotten pages are still
* available to be loaded into the free space map later.
* Also note that we don't change the stored fraged_pages list, only
* our local variable num_fraged_pages; so the forgotten pages are
* still available to be loaded into the free space map later.
*/
while (num_fraged_pages > 0 &&
fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
@ -1839,17 +1838,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* --- it must be recently obsoleted, else scan_heap would have
* deemed it removable.)
*
* NOTE: this test is not 100% accurate: it is possible for a tuple
* to be an updated one with recent xmin, and yet not match any
* new_tid entry in the vtlinks list. Presumably there was once a
* parent tuple with xmax matching the xmin, but it's possible
* that that tuple has been removed --- for example, if it had
* xmin = xmax and wasn't itself an updated version, then
* NOTE: this test is not 100% accurate: it is possible for a
* tuple to be an updated one with recent xmin, and yet not match
* any new_tid entry in the vtlinks list. Presumably there was
* once a parent tuple with xmax matching the xmin, but it's
* possible that that tuple has been removed --- for example, if
* it had xmin = xmax and wasn't itself an updated version, then
* HeapTupleSatisfiesVacuum would deem it removable as soon as the
* xmin xact completes.
*
* To be on the safe side, we abandon the repair_frag process if we
* cannot find the parent tuple in vtlinks. This may be overly
* To be on the safe side, we abandon the repair_frag process if
* we cannot find the parent tuple in vtlinks. This may be overly
* conservative; AFAICS it would be safe to move the chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
@ -2388,8 +2387,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Clean moved-off tuples from last page in Nvacpagelist list.
*
* We need only do this in this one page, because higher-numbered pages
* are going to be truncated from the relation entirely. But see
* We need only do this in this one page, because higher-numbered
* pages are going to be truncated from the relation entirely. But see
* comments for update_hint_bits().
*/
if (vacpage->blkno == (blkno - 1) &&
@ -2544,8 +2543,8 @@ move_chain_tuple(Relation rel,
* Therefore we must do everything that uses old_tup->t_data BEFORE this
* step!!
*
* This path is different from the other callers of vacuum_page, because we
* have already incremented the vacpage's offsets_used field to account
* This path is different from the other callers of vacuum_page, because
* we have already incremented the vacpage's offsets_used field to account
* for the tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is wrong. But since that's a
* good debugging check for all other callers, we work around it here

View File

@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.62 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.63 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -286,21 +286,21 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* relation but crashes before initializing the page. Reclaim such
* pages for use.
*
* We have to be careful here because we could be looking at a page
* that someone has just added to the relation and not yet been
* able to initialize (see RelationGetBufferForTuple). To
* We have to be careful here because we could be looking at a
* page that someone has just added to the relation and not yet
* been able to initialize (see RelationGetBufferForTuple). To
* interlock against that, release the buffer read lock (which we
* must do anyway) and grab the relation extension lock before
* re-locking in exclusive mode. If the page is still
* uninitialized by then, it must be left over from a crashed
* backend, and we can initialize it.
*
* We don't really need the relation lock when this is a new or temp
* relation, but it's probably not worth the code space to check
* that, since this surely isn't a critical path.
* We don't really need the relation lock when this is a new or
* temp relation, but it's probably not worth the code space to
* check that, since this surely isn't a critical path.
*
* Note: the comparable code in vacuum.c need not worry because it's
* got exclusive lock on the whole relation.
* Note: the comparable code in vacuum.c need not worry because
* it's got exclusive lock on the whole relation.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockRelationForExtension(onerel, ExclusiveLock);
@ -366,12 +366,12 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Tuple is good. Consider whether to replace its xmin
* value with FrozenTransactionId.
*
* NB: Since we hold only a shared buffer lock here, we are
* assuming that TransactionId read/write is atomic. This
* is not the only place that makes such an assumption.
* It'd be possible to avoid the assumption by momentarily
* acquiring exclusive lock, but for the moment I see no
* need to.
* NB: Since we hold only a shared buffer lock here, we
* are assuming that TransactionId read/write is atomic.
* This is not the only place that makes such an
* assumption. It'd be possible to avoid the assumption by
* momentarily acquiring exclusive lock, but for the
* moment I see no need to.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.114 2005/10/15 02:49:16 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.115 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -134,8 +134,8 @@ assign_datestyle(const char *value, bool doit, GucSource source)
* Easiest way to get the current DEFAULT state is to fetch the
* DEFAULT string from guc.c and recursively parse it.
*
* We can't simply "return assign_datestyle(...)" because we need to
* handle constructs like "DEFAULT, ISO".
* We can't simply "return assign_datestyle(...)" because we need
* to handle constructs like "DEFAULT, ISO".
*/
int saveDateStyle = DateStyle;
int saveDateOrder = DateOrder;
@ -339,8 +339,8 @@ assign_timezone(const char *value, bool doit, GucSource source)
* timezone setting, we will return that name rather than UNKNOWN
* as the canonical spelling.
*
* During GUC initialization, since the timezone library isn't set up
* yet, pg_get_timezone_name will return NULL and we will leave
* During GUC initialization, since the timezone library isn't set
* up yet, pg_get_timezone_name will return NULL and we will leave
* the setting as UNKNOWN. If this isn't overridden from the
* config file then pg_timezone_initialize() will eventually
* select a default value from the environment.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.91 2005/10/15 02:49:16 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.92 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -410,7 +410,8 @@ DefineView(RangeVar *view, Query *viewParse, bool replace)
/*
* Create the view relation
*
* NOTE: if it already exists and replace is false, the xact will be aborted.
* NOTE: if it already exists and replace is false, the xact will be
* aborted.
*/
viewOid = DefineVirtualRelation(view, viewParse->targetList, replace);

View File

@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85 2005/10/15 02:49:16 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.86 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -403,9 +403,9 @@ ExecMayReturnRawTuples(PlanState *node)
* but just pass up input tuples, we have to recursively examine the input
* plan node.
*
* Note: Hash and Material are listed here because they sometimes return an
* original input tuple, not a copy. But Sort and SetOp never return an
* original tuple, so they can be treated like projecting nodes.
* Note: Hash and Material are listed here because they sometimes return
* an original input tuple, not a copy. But Sort and SetOp never return
* an original tuple, so they can be treated like projecting nodes.
*/
switch (nodeTag(node))
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16 2005/10/15 02:49:16 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.17 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -381,9 +381,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
/*
* created new entry
*
* Zero any caller-requested space in the entry. (This zaps the "key
* data" dynahash.c copied into the new entry, but we don't care
* since we're about to overwrite it anyway.)
* Zero any caller-requested space in the entry. (This zaps the
* "key data" dynahash.c copied into the new entry, but we don't
* care since we're about to overwrite it anyway.)
*/
MemSet(entry, 0, hashtable->entrysize);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50 2005/10/15 02:49:16 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.51 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -87,11 +87,11 @@ ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot)
* Now calculate the mapping between the original tuple's attributes and
* the "clean" tuple's attributes.
*
* The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
* for every attribute of the "clean" tuple. The value of this entry is
* the attribute number of the corresponding attribute of the "original"
* tuple. (Zero indicates a NULL output attribute, but we do not use that
* feature in this routine.)
* The "map" is an array of "cleanLength" attribute numbers, i.e. one
* entry for every attribute of the "clean" tuple. The value of this entry
* is the attribute number of the corresponding attribute of the
* "original" tuple. (Zero indicates a NULL output attribute, but we do
* not use that feature in this routine.)
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
@ -158,11 +158,11 @@ ExecInitJunkFilterConversion(List *targetList,
* Calculate the mapping between the original tuple's attributes and the
* "clean" tuple's attributes.
*
* The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
* for every attribute of the "clean" tuple. The value of this entry is
* the attribute number of the corresponding attribute of the "original"
* tuple. We store zero for any deleted attributes, marking that a NULL
* is needed in the output tuple.
* The "map" is an array of "cleanLength" attribute numbers, i.e. one
* entry for every attribute of the "clean" tuple. The value of this entry
* is the attribute number of the corresponding attribute of the
* "original" tuple. We store zero for any deleted attributes, marking
* that a NULL is needed in the output tuple.
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)

View File

@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.260 2005/11/20 18:38:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.261 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -376,10 +376,10 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
/*
* userid to check as: current user unless we have a setuid indication.
*
* Note: GetUserId() is presently fast enough that there's no harm in calling
* it separately for each RTE. If that stops being true, we could call it
* once in ExecCheckRTPerms and pass the userid down from there. But for
* now, no need for the extra clutter.
* Note: GetUserId() is presently fast enough that there's no harm in
* calling it separately for each RTE. If that stops being true, we could
* call it once in ExecCheckRTPerms and pass the userid down from there.
* But for now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@ -582,8 +582,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
* initialize the executor "tuple" table. We need slots for all the plan
* nodes, plus possibly output slots for the junkfilter(s). At this point
* we aren't sure if we need junkfilters, so just add slots for them
* unconditionally. Also, if it's not a SELECT, set up a slot for use
* for trigger output tuples.
* unconditionally. Also, if it's not a SELECT, set up a slot for use for
* trigger output tuples.
*/
{
int nSlots = ExecCountSlotsNode(plan);
@ -797,11 +797,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* We can skip WAL-logging the insertions, unless PITR is in use.
*
* Note that for a non-temp INTO table, this is safe only because we know
* that the catalog changes above will have been WAL-logged, and so
* RecordTransactionCommit will think it needs to WAL-log the eventual
* transaction commit. Else the commit might be lost, even though all
* the data is safely fsync'd ...
* Note that for a non-temp INTO table, this is safe only because we
* know that the catalog changes above will have been WAL-logged, and
* so RecordTransactionCommit will think it needs to WAL-log the
* eventual transaction commit. Else the commit might be lost, even
* though all the data is safely fsync'd ...
*/
estate->es_into_relation_use_wal = XLogArchivingActive();
}
@ -1495,8 +1495,8 @@ ExecDelete(TupleTableSlot *slot,
/*
* delete the tuple
*
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
* row to be deleted is visible to that snapshot, and throw a can't-
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
* the row to be deleted is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
@ -1635,8 +1635,8 @@ ExecUpdate(TupleTableSlot *slot,
/*
* Check the constraints of the tuple
*
* If we generate a new candidate tuple after EvalPlanQual testing, we must
* loop back here and recheck constraints. (We don't need to redo
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck constraints. (We don't need to redo
* triggers, however. If there are any BEFORE triggers then trigger.c
* will have done heap_lock_tuple to lock the correct tuple, so there's no
* need to do them again.)
@ -1648,8 +1648,8 @@ lreplace:;
/*
* replace the heap tuple
*
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
* row to be updated is visible to that snapshot, and throw a can't-
* Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
* the row to be updated is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
@ -1703,7 +1703,7 @@ lreplace:;
* Note: instead of having to update the old index tuples associated with
* the heap tuple, all we do is form and insert new index tuples. This is
* because UPDATEs are actually DELETEs and INSERTs, and index tuple
* deletion is done later by VACUUM (see notes in ExecDelete). All we do
* deletion is done later by VACUUM (see notes in ExecDelete). All we do
* here is insert new index tuples. -cim 9/27/89
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.184 2005/11/17 22:14:51 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.185 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -65,7 +65,7 @@ static Datum ExecEvalAggref(AggrefExprState *aggref,
static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
@ -268,7 +268,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
if (isAssignment)
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("array subscript in assignment must not be NULL")));
errmsg("array subscript in assignment must not be NULL")));
*isNull = true;
return (Datum) NULL;
}
@ -333,15 +333,15 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
* array and the value to be assigned into it must be non-NULL, else
* we punt and return the original array.
*/
if (astate->refattrlength > 0) /* fixed-length array? */
if (astate->refattrlength > 0) /* fixed-length array? */
if (eisnull || *isNull)
return PointerGetDatum(array_source);
/*
* For assignment to varlena arrays, we handle a NULL original array
* by substituting an empty (zero-dimensional) array; insertion of
* the new element will result in a singleton array value. It does
* not matter whether the new element is NULL.
* by substituting an empty (zero-dimensional) array; insertion of the
* new element will result in a singleton array value. It does not
* matter whether the new element is NULL.
*/
if (*isNull)
{
@ -516,8 +516,8 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
Assert(variable->varattno == InvalidAttrNumber);
/*
* Whole-row Vars can only appear at the level of a relation scan,
* never in a join.
* Whole-row Vars can only appear at the level of a relation scan, never
* in a join.
*/
Assert(variable->varno != INNER);
Assert(variable->varno != OUTER);
@ -527,8 +527,8 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
tupleDesc = slot->tts_tupleDescriptor;
/*
* We have to make a copy of the tuple so we can safely insert the
* Datum overhead fields, which are not set in on-disk tuples.
* We have to make a copy of the tuple so we can safely insert the Datum
* overhead fields, which are not set in on-disk tuples.
*/
dtuple = (HeapTupleHeader) palloc(tuple->t_len);
memcpy((char *) dtuple, (char *) tuple->t_data, tuple->t_len);
@ -536,12 +536,11 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
HeapTupleHeaderSetDatumLength(dtuple, tuple->t_len);
/*
* If the Var identifies a named composite type, label the tuple
* with that type; otherwise use what is in the tupleDesc.
* If the Var identifies a named composite type, label the tuple with that
* type; otherwise use what is in the tupleDesc.
*
* It's likely that the slot's tupleDesc is a record type; if so,
* make sure it's been "blessed", so that the Datum can be interpreted
* later.
* It's likely that the slot's tupleDesc is a record type; if so, make
* sure it's been "blessed", so that the Datum can be interpreted later.
*/
if (variable->vartype != RECORDOID)
{
@ -1652,8 +1651,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
return BoolGetDatum(!useOr);
/*
* If the scalar is NULL, and the function is strict, return NULL;
* no point in iterating the loop.
* If the scalar is NULL, and the function is strict, return NULL; no
* point in iterating the loop.
*/
if (fcinfo.argnull[0] && sstate->fxprstate.func.fn_strict)
{
@ -2231,7 +2230,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
}
else
{
dataoffset = 0; /* marker for no null bitmap */
dataoffset = 0; /* marker for no null bitmap */
nbytes += ARR_OVERHEAD_NONULLS(ndims);
}
@ -2943,7 +2942,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
{
case T_Var:
{
Var *var = (Var *) node;
Var *var = (Var *) node;
state = (ExprState *) makeNode(ExprState);
if (var->varattno != InvalidAttrNumber)

View File

@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88 2005/10/15 02:49:16 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.89 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -631,8 +631,9 @@ ExecMaterializeSlot(TupleTableSlot *slot)
* in which this could be optimized but it's probably not worth worrying
* about.)
*
* We may be called in a context that is shorter-lived than the tuple slot,
* but we have to ensure that the materialized tuple will survive anyway.
* We may be called in a context that is shorter-lived than the tuple
* slot, but we have to ensure that the materialized tuple will survive
* anyway.
*/
oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
newTuple = ExecCopySlotTuple(slot);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.127 2005/11/14 17:42:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.128 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -769,19 +769,19 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
/*
* Open and lock the index relation
*
* If the index AM supports concurrent updates, obtain RowExclusiveLock
* to signify that we are updating the index. This locks out only
* operations that need exclusive access, such as relocating the index
* to a new tablespace.
* If the index AM supports concurrent updates, obtain
* RowExclusiveLock to signify that we are updating the index. This
* locks out only operations that need exclusive access, such as
* relocating the index to a new tablespace.
*
* If the index AM is not safe for concurrent updates, obtain an
* exclusive lock on the index to lock out other updaters as well as
* readers (index_beginscan places AccessShareLock).
*
* If there are multiple not-concurrent-safe indexes, all backends must
* lock the indexes in the same order or we will get deadlocks here.
* This is guaranteed by RelationGetIndexList(), which promises to
* return the index list in OID order.
* If there are multiple not-concurrent-safe indexes, all backends
* must lock the indexes in the same order or we will get deadlocks
* here. This is guaranteed by RelationGetIndexList(), which promises
* to return the index list in OID order.
*
* The locks will be released in ExecCloseIndices.
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98 2005/10/15 02:49:16 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.99 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -268,11 +268,11 @@ init_sql_fcache(FmgrInfo *finfo)
* If the function has any arguments declared as polymorphic types, then
* it wasn't type-checked at definition time; must do so now.
*
* Also, force a type-check if the declared return type is a rowtype; we need
* to find out whether we are actually returning the whole tuple result,
* or just regurgitating a rowtype expression result. In the latter case
* we clear returnsTuple because we need not act different from the scalar
* result case.
* Also, force a type-check if the declared return type is a rowtype; we
* need to find out whether we are actually returning the whole tuple
* result, or just regurgitating a rowtype expression result. In the
* latter case we clear returnsTuple because we need not act different
* from the scalar result case.
*
* In the returnsTuple case, check_sql_fn_retval will also construct a
* JunkFilter we can use to coerce the returned rowtype to the desired
@ -498,12 +498,12 @@ postquel_execute(execution_state *es,
* labeling to make it a valid Datum. There are several reasons why
* we do this:
*
* 1. To copy the tuple out of the child execution context and into the
* desired result context.
* 1. To copy the tuple out of the child execution context and into
* the desired result context.
*
* 2. To remove any junk attributes present in the raw subselect result.
* (This is probably not absolutely necessary, but it seems like good
* policy.)
* 2. To remove any junk attributes present in the raw subselect
* result. (This is probably not absolutely necessary, but it seems
* like good policy.)
*
* 3. To insert dummy null columns if the declared result type has any
* attisdropped columns.

View File

@ -61,7 +61,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135 2005/10/15 02:49:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.136 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -283,8 +283,8 @@ initialize_aggregates(AggState *aggstate,
/*
* (Re)set transValue to the initial value.
*
* Note that when the initial value is pass-by-ref, we must copy it (into
* the aggcontext) since we will pfree the transValue later.
* Note that when the initial value is pass-by-ref, we must copy it
* (into the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
@ -341,8 +341,8 @@ advance_transition_function(AggState *aggstate,
* already checked that the agg's input type is binary-compatible
* with its transtype, so straight copy here is OK.)
*
* We must copy the datum into aggcontext if it is pass-by-ref. We do
* not need to pfree the old transValue, since it's NULL.
* We must copy the datum into aggcontext if it is pass-by-ref. We
* do not need to pfree the old transValue, since it's NULL.
*/
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(newVal,
@ -842,8 +842,8 @@ agg_retrieve_direct(AggState *aggstate)
* aggregate will have a targetlist reference to ctid. We need to
* return a null for ctid in that situation, not coredump.
*
* The values returned for the aggregates will be the initial values of
* the transition functions.
* The values returned for the aggregates will be the initial values
* of the transition functions.
*/
if (TupIsNull(firstSlot))
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10 2005/10/15 02:49:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.11 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -211,8 +211,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
/*
* Miscellaneous initialization
*
* We do not need a standard exprcontext for this node, though we may decide
* below to create a runtime-key exprcontext
* We do not need a standard exprcontext for this node, though we may
* decide below to create a runtime-key exprcontext
*/
/*

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.97 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.98 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -237,8 +237,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
/*
* Initialize the hash table control block.
*
* The hashtable control block is just palloc'd from the executor's per-query
* memory context.
* The hashtable control block is just palloc'd from the executor's
* per-query memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
hashtable->nbuckets = nbuckets;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.76 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.77 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -622,13 +622,13 @@ start_over:
* 1. In a LEFT JOIN, we have to process outer batches even if the inner
* batch is empty.
*
* 2. If we have increased nbatch since the initial estimate, we have to scan
* inner batches since they might contain tuples that need to be
* 2. If we have increased nbatch since the initial estimate, we have to
* scan inner batches since they might contain tuples that need to be
* reassigned to later inner batches.
*
* 3. Similarly, if we have increased nbatch since starting the outer scan,
* we have to rescan outer batches in case they contain tuples that need
* to be reassigned.
* 3. Similarly, if we have increased nbatch since starting the outer
* scan, we have to rescan outer batches in case they contain tuples that
* need to be reassigned.
*/
curbatch++;
while (curbatch < nbatch &&

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104 2005/10/15 02:49:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.105 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -578,8 +578,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
* listed in the var node and use the value of the const as comparison
* data.
*
* If we don't have a const node, it means our scan key is a function of
* information obtained during the execution of the plan, in which
* If we don't have a const node, it means our scan key is a function
* of information obtained during the execution of the plan, in which
* case we need to recalculate the index scan key at run time. Hence,
* we set have_runtime_keys to true and place the appropriate
* subexpression in run_keys. The corresponding scan key values are

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75 2005/10/15 02:49:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.76 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -239,8 +239,8 @@ MJExamineQuals(List *qualList, PlanState *parent)
* much like SelectSortFunction except we insist on matching all the
* operators provided, and it can be a cross-type opclass.
*
* XXX for now, insist on forward sort so that NULLs can be counted on to
* be high.
* XXX for now, insist on forward sort so that NULLs can be counted on
* to be high.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(qual->opno),
@ -1121,13 +1121,13 @@ ExecMergeJoin(MergeJoinState *node)
* scan position to the first mark, and go join that tuple
* (and any following ones) to the new outer.
*
* NOTE: we do not need to worry about the MatchedInner state
* for the rescanned inner tuples. We know all of them
* will match this new outer tuple and therefore won't be
* emitted as fill tuples. This works *only* because we
* require the extra joinquals to be nil when doing a
* right or full join --- otherwise some of the rescanned
* tuples might fail the extra joinquals.
* NOTE: we do not need to worry about the MatchedInner
* state for the rescanned inner tuples. We know all of
* them will match this new outer tuple and therefore
* won't be emitted as fill tuples. This works *only*
* because we require the extra joinquals to be nil when
* doing a right or full join --- otherwise some of the
* rescanned tuples might fail the extra joinquals.
*/
ExecRestrPos(innerPlan);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39 2005/10/15 02:49:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.40 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -223,8 +223,8 @@ ExecNestLoop(NestLoopState *node)
* test the inner and outer tuples to see if they satisfy the node's
* qualification.
*
* Only the joinquals determine MatchedOuter status, but all quals must
* pass to actually return the tuple.
* Only the joinquals determine MatchedOuter status, but all quals
* must pass to actually return the tuple.
*/
ENL1_printf("testing qualification");

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70 2005/10/15 02:49:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.71 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -128,8 +128,8 @@ ExecHashSubPlan(SubPlanState *node,
* unequal to the LHS; if so, the result is UNKNOWN. (We skip that part
* if we don't care about UNKNOWN.) Otherwise, the result is FALSE.
*
* Note: the reason we can avoid a full scan of the main hash table is that
* the combining operators are assumed never to yield NULL when both
* Note: the reason we can avoid a full scan of the main hash table is
* that the combining operators are assumed never to yield NULL when both
* inputs are non-null. If they were to do so, we might need to produce
* UNKNOWN instead of FALSE because of an UNKNOWN result in comparing the
* LHS to some main-table entry --- which is a comparison we will not even
@ -255,9 +255,9 @@ ExecScanSubPlan(SubPlanState *node,
* FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
* MULTIEXPR_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one tuple,
* else an error is raised. For ARRAY_SUBLINK we allow the subplan to
* produce more than one tuple. In either case, if zero tuples are
* For EXPR_SUBLINK we require the subplan to produce no more than one
* tuple, else an error is raised. For ARRAY_SUBLINK we allow the subplan
* to produce more than one tuple. In either case, if zero tuples are
* produced, we return NULL. Assuming we get a tuple, we just use its
* first column (there can be only one non-junk column in this case).
*/
@ -480,13 +480,13 @@ buildSubPlanHash(SubPlanState *node)
* If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
* NULL) results of the IN operation, then we have to store subplan output
* rows that are partly or wholly NULL. We store such rows in a separate
* hash table that we expect will be much smaller than the main table.
* (We can use hashing to eliminate partly-null rows that are not
* distinct. We keep them separate to minimize the cost of the inevitable
* full-table searches; see findPartialMatch.)
* hash table that we expect will be much smaller than the main table. (We
* can use hashing to eliminate partly-null rows that are not distinct.
* We keep them separate to minimize the cost of the inevitable full-table
* searches; see findPartialMatch.)
*
* If it's not necessary to distinguish FALSE and UNKNOWN, then we don't need
* to store subplan output rows that contain NULL.
* If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
* need to store subplan output rows that contain NULL.
*/
MemoryContextReset(node->tablecxt);
node->hashtable = NULL;
@ -796,8 +796,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* righthand sides. We need both the ExprState list (for ExecProject)
* and the underlying parse Exprs (for ExecTypeFromTL).
*
* We also extract the combining operators themselves to initialize the
* equality and hashing functions for the hash tables.
* We also extract the combining operators themselves to initialize
* the equality and hashing functions for the hash tables.
*/
lefttlist = righttlist = NIL;
leftptlist = rightptlist = NIL;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48 2005/10/15 02:49:17 momjian Exp $
* $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.49 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -56,10 +56,10 @@ ExecUnique(UniqueState *node)
* now loop, returning only non-duplicate tuples. We assume that the
* tuples arrive in sorted order so we can detect duplicates easily.
*
* We return the first tuple from each group of duplicates (or the last tuple
* of each group, when moving backwards). At either end of the subplan,
* clear the result slot so that we correctly return the first/last tuple
* when reversing direction.
* We return the first tuple from each group of duplicates (or the last
* tuple of each group, when moving backwards). At either end of the
* subplan, clear the result slot so that we correctly return the
* first/last tuple when reversing direction.
*/
for (;;)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.144 2005/11/03 17:11:36 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.145 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -113,8 +113,8 @@ SPI_connect(void)
/*
* Create memory contexts for this procedure
*
* XXX it would be better to use PortalContext as the parent context, but we
* may not be inside a portal (consider deferred-trigger execution).
* XXX it would be better to use PortalContext as the parent context, but
* we may not be inside a portal (consider deferred-trigger execution).
* Perhaps CurTransactionContext would do? For now it doesn't matter
* because we clean up explicitly in AtEOSubXact_SPI().
*/

Some files were not shown because too many files have changed in this diff Show More