postgresql/src/backend/access/common/heaptuple.c

785 lines
18 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* heaptuple.c
* This file contains heap tuple accessor and mutator routines, as well
* as various tuple utilities.
*
2003-08-04 04:40:20 +02:00
* Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
2003-11-29 20:52:15 +01:00
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.88 2003/11/29 19:51:39 pgsql Exp $
*
* NOTES
* The old interface functions have been converted to macros
* and moved to heapam.h
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/heapam.h"
#include "catalog/pg_type.h"
/* ----------------------------------------------------------------
* misc support routines
* ----------------------------------------------------------------
*/
/* ----------------
* ComputeDataSize
* ----------------
*/
Size
ComputeDataSize(TupleDesc tupleDesc,
1998-09-01 05:29:17 +02:00
Datum *value,
char *nulls)
{
uint32 data_length = 0;
int i;
int numberOfAttributes = tupleDesc->natts;
1998-09-01 05:29:17 +02:00
Form_pg_attribute *att = tupleDesc->attrs;
for (i = 0; i < numberOfAttributes; i++)
{
if (nulls[i] != ' ')
continue;
data_length = att_align(data_length, att[i]->attalign);
data_length = att_addlength(data_length, att[i]->attlen, value[i]);
}
return data_length;
}
/* ----------------
* DataFill
* ----------------
*/
void
DataFill(char *data,
TupleDesc tupleDesc,
1998-09-01 05:29:17 +02:00
Datum *value,
char *nulls,
1997-11-02 16:27:14 +01:00
uint16 *infomask,
bits8 *bit)
{
bits8 *bitP = 0;
int bitmask = 0;
Size data_length;
int i;
int numberOfAttributes = tupleDesc->natts;
1998-09-01 05:29:17 +02:00
Form_pg_attribute *att = tupleDesc->attrs;
if (bit != NULL)
{
bitP = &bit[-1];
bitmask = CSIGNBIT;
}
*infomask &= ~(HEAP_HASNULL | HEAP_HASVARWIDTH | HEAP_HASEXTENDED);
for (i = 0; i < numberOfAttributes; i++)
{
if (bit != NULL)
{
if (bitmask != CSIGNBIT)
bitmask <<= 1;
else
{
bitP += 1;
*bitP = 0x0;
bitmask = 1;
}
if (nulls[i] == 'n')
{
*infomask |= HEAP_HASNULL;
continue;
}
*bitP |= bitmask;
}
/* XXX we are aligning the pointer itself, not the offset */
data = (char *) att_align((long) data, att[i]->attalign);
if (att[i]->attbyval)
{
/* pass-by-value */
store_att_byval(data, value[i], att[i]->attlen);
data_length = att[i]->attlen;
}
else if (att[i]->attlen == -1)
{
/* varlena */
*infomask |= HEAP_HASVARWIDTH;
if (VARATT_IS_EXTERNAL(value[i]))
*infomask |= HEAP_HASEXTERNAL;
if (VARATT_IS_COMPRESSED(value[i]))
*infomask |= HEAP_HASCOMPRESSED;
data_length = VARATT_SIZE(DatumGetPointer(value[i]));
memcpy(data, DatumGetPointer(value[i]), data_length);
}
else if (att[i]->attlen == -2)
{
/* cstring */
*infomask |= HEAP_HASVARWIDTH;
data_length = strlen(DatumGetCString(value[i])) + 1;
memcpy(data, DatumGetPointer(value[i]), data_length);
}
else
{
/* fixed-length pass-by-reference */
Assert(att[i]->attlen > 0);
data_length = att[i]->attlen;
memcpy(data, DatumGetPointer(value[i]), data_length);
}
data += data_length;
}
}
/* ----------------------------------------------------------------
* heap tuple interface
* ----------------------------------------------------------------
*/
/* ----------------
* heap_attisnull - returns 1 iff tuple attribute is not present
* ----------------
*/
int
heap_attisnull(HeapTuple tup, int attnum)
{
1998-11-27 20:52:36 +01:00
if (attnum > (int) tup->t_data->t_natts)
1998-09-01 05:29:17 +02:00
return 1;
if (HeapTupleNoNulls(tup))
1998-09-01 05:29:17 +02:00
return 0;
if (attnum > 0)
1998-11-27 20:52:36 +01:00
return att_isnull(attnum - 1, tup->t_data->t_bits);
else
switch (attnum)
{
case TableOidAttributeNumber:
case SelfItemPointerAttributeNumber:
case ObjectIdAttributeNumber:
case MinTransactionIdAttributeNumber:
case MinCommandIdAttributeNumber:
case MaxTransactionIdAttributeNumber:
case MaxCommandIdAttributeNumber:
/* these are never null */
break;
default:
elog(ERROR, "invalid attnum: %d", attnum);
}
1998-09-01 05:29:17 +02:00
return 0;
}
/* ----------------
* nocachegetattr
*
* This only gets called from fastgetattr() macro, in cases where
* we can't use a cacheoffset and the value is not null.
*
* This caches attribute offsets in the attribute descriptor.
*
* An alternate way to speed things up would be to cache offsets
* with the tuple, but that seems more difficult unless you take
* the storage hit of actually putting those offsets into the
* tuple you send to disk. Yuck.
*
* This scheme will be slightly slower than that, but should
* perform well for queries which hit large #'s of tuples. After
* you cache the offsets once, examining all the other tuples using
* the same attribute descriptor will go much quicker. -cim 5/4/91
* ----------------
*/
Datum
1998-11-27 20:52:36 +01:00
nocachegetattr(HeapTuple tuple,
int attnum,
TupleDesc tupleDesc,
bool *isnull)
{
1999-05-25 18:15:34 +02:00
HeapTupleHeader tup = tuple->t_data;
Form_pg_attribute *att = tupleDesc->attrs;
char *tp; /* ptr to att in tuple */
bits8 *bp = tup->t_bits; /* ptr to null bitmask in tuple */
bool slow = false; /* do we have to walk nulls? */
(void) isnull; /* not used */
#ifdef IN_MACRO
/* This is handled in the macro */
Assert(attnum > 0);
if (isnull)
*isnull = false;
#endif
attnum--;
/* ----------------
* Three cases:
*
* 1: No nulls and no variable-width attributes.
* 2: Has a null or a var-width AFTER att.
* 3: Has nulls or var-widths BEFORE att.
* ----------------
*/
1998-11-27 20:52:36 +01:00
if (HeapTupleNoNulls(tuple))
{
#ifdef IN_MACRO
/* This is handled in the macro */
if (att[attnum]->attcacheoff != -1)
{
return fetchatt(att[attnum],
(char *) tup + tup->t_hoff +
att[attnum]->attcacheoff);
}
#endif
}
else
{
/*
* there's a null somewhere in the tuple
*
* check to see if desired att is null
*/
#ifdef IN_MACRO
/* This is handled in the macro */
if (att_isnull(attnum, bp))
{
if (isnull)
*isnull = true;
return (Datum) NULL;
}
#endif
/*
* Now check to see if any preceding bits are null...
*/
{
int byte = attnum >> 3;
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
1999-05-25 18:15:34 +02:00
if ((~bp[byte]) & ((1 << finalbit) - 1))
slow = true;
else
{
/* check for nulls in any "earlier" bytes */
1999-05-25 18:15:34 +02:00
int i;
for (i = 0; i < byte; i++)
{
if (bp[i] != 0xFF)
{
slow = true;
break;
}
}
}
}
}
tp = (char *) tup + tup->t_hoff;
/*
* now check for any non-fixed length attrs before our attribute
*/
if (!slow)
{
if (att[attnum]->attcacheoff != -1)
{
return fetchatt(att[attnum],
tp + att[attnum]->attcacheoff);
}
1998-11-27 20:52:36 +01:00
else if (!HeapTupleAllFixed(tuple))
{
int j;
1999-05-25 18:15:34 +02:00
/*
2001-03-22 05:01:46 +01:00
* In for(), we test <= and not < because we want to see if we
* can go past it in initializing offsets.
*/
for (j = 0; j <= attnum; j++)
{
if (att[j]->attlen <= 0)
{
slow = true;
break;
}
}
}
}
/*
2001-03-22 05:01:46 +01:00
* If slow is false, and we got here, we know that we have a tuple
2002-09-04 22:31:48 +02:00
* with no nulls or var-widths before the target attribute. If
* possible, we also want to initialize the remainder of the attribute
* cached offset values.
*/
if (!slow)
{
int j = 1;
long off;
/*
* need to set cache for some atts
*/
att[0]->attcacheoff = 0;
while (j < attnum && att[j]->attcacheoff > 0)
j++;
off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
for (; j <= attnum ||
/* Can we compute more? We will probably need them */
(j < tup->t_natts &&
att[j]->attcacheoff == -1 &&
1998-11-27 20:52:36 +01:00
(HeapTupleNoNulls(tuple) || !att_isnull(j, bp)) &&
(HeapTupleAllFixed(tuple) || att[j]->attlen > 0)); j++)
{
off = att_align(off, att[j]->attalign);
att[j]->attcacheoff = off;
off = att_addlength(off, att[j]->attlen, tp + off);
}
return fetchatt(att[attnum], tp + att[attnum]->attcacheoff);
}
else
{
bool usecache = true;
int off = 0;
int i;
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
* Note - This loop is a little tricky. On iteration i we first set
* the offset for attribute i and figure out how much the offset
* should be incremented. Finally, we need to align the offset
* based on the size of attribute i+1 (for which the offset has
* been computed). -mer 12 Dec 1991
*/
for (i = 0; i < attnum; i++)
{
1998-11-27 20:52:36 +01:00
if (!HeapTupleNoNulls(tuple))
{
if (att_isnull(i, bp))
{
usecache = false;
continue;
}
}
/* If we know the next offset, we can skip the rest */
if (usecache && att[i]->attcacheoff != -1)
off = att[i]->attcacheoff;
else
{
off = att_align(off, att[i]->attalign);
if (usecache)
att[i]->attcacheoff = off;
}
off = att_addlength(off, att[i]->attlen, tp + off);
if (usecache && att[i]->attlen <= 0)
usecache = false;
}
off = att_align(off, att[attnum]->attalign);
return fetchatt(att[attnum], tp + off);
}
}
/* ----------------
* heap_getsysattr
*
* Fetch the value of a system attribute for a tuple.
*
* This is a support routine for the heap_getattr macro. The macro
* has already determined that the attnum refers to a system attribute.
* ----------------
*/
Datum
heap_getsysattr(HeapTuple tup, int attnum, bool *isnull)
{
Datum result;
Assert(tup);
/* Currently, no sys attribute ever reads as NULL. */
if (isnull)
*isnull = false;
switch (attnum)
{
case SelfItemPointerAttributeNumber:
/* pass-by-reference datatype */
result = PointerGetDatum(&(tup->t_self));
break;
case ObjectIdAttributeNumber:
result = ObjectIdGetDatum(HeapTupleGetOid(tup));
break;
case MinTransactionIdAttributeNumber:
result = TransactionIdGetDatum(HeapTupleHeaderGetXmin(tup->t_data));
break;
case MinCommandIdAttributeNumber:
result = CommandIdGetDatum(HeapTupleHeaderGetCmin(tup->t_data));
break;
case MaxTransactionIdAttributeNumber:
result = TransactionIdGetDatum(HeapTupleHeaderGetXmax(tup->t_data));
break;
case MaxCommandIdAttributeNumber:
result = CommandIdGetDatum(HeapTupleHeaderGetCmax(tup->t_data));
break;
case TableOidAttributeNumber:
result = ObjectIdGetDatum(tup->t_tableOid);
break;
default:
elog(ERROR, "invalid attnum: %d", attnum);
result = 0; /* keep compiler quiet */
break;
}
return result;
}
/* ----------------
* heap_copytuple
*
* returns a copy of an entire tuple
*
* The HeapTuple struct, tuple header, and tuple data are all allocated
* as a single palloc() block.
* ----------------
*/
HeapTuple
heap_copytuple(HeapTuple tuple)
{
HeapTuple newTuple;
1998-11-27 20:52:36 +01:00
if (!HeapTupleIsValid(tuple) || tuple->t_data == NULL)
1998-09-01 05:29:17 +02:00
return NULL;
1998-11-27 20:52:36 +01:00
newTuple = (HeapTuple) palloc(HEAPTUPLESIZE + tuple->t_len);
newTuple->t_len = tuple->t_len;
newTuple->t_self = tuple->t_self;
newTuple->t_tableOid = tuple->t_tableOid;
newTuple->t_datamcxt = CurrentMemoryContext;
1998-11-27 20:52:36 +01:00
newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
memcpy((char *) newTuple->t_data, (char *) tuple->t_data, tuple->t_len);
1998-09-01 05:29:17 +02:00
return newTuple;
}
1998-11-27 20:52:36 +01:00
/* ----------------
* heap_copytuple_with_tuple
*
* copy a tuple into a caller-supplied HeapTuple management struct
1998-11-27 20:52:36 +01:00
* ----------------
*/
void
heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
{
if (!HeapTupleIsValid(src) || src->t_data == NULL)
{
dest->t_data = NULL;
return;
}
1999-05-25 18:15:34 +02:00
1998-11-27 20:52:36 +01:00
dest->t_len = src->t_len;
dest->t_self = src->t_self;
dest->t_tableOid = src->t_tableOid;
dest->t_datamcxt = CurrentMemoryContext;
1998-11-27 20:52:36 +01:00
dest->t_data = (HeapTupleHeader) palloc(src->t_len);
memcpy((char *) dest->t_data, (char *) src->t_data, src->t_len);
1998-11-27 20:52:36 +01:00
}
#ifdef NOT_USED
/* ----------------
* heap_deformtuple
*
* the inverse of heap_formtuple (see below)
* ----------------
*/
void
heap_deformtuple(HeapTuple tuple,
TupleDesc tdesc,
1998-09-01 05:29:17 +02:00
Datum *values,
char *nulls)
{
int i;
int natts;
Assert(HeapTupleIsValid(tuple));
natts = tuple->t_natts;
for (i = 0; i < natts; i++)
{
bool isnull;
values[i] = heap_getattr(tuple,
i + 1,
tdesc,
&isnull);
if (isnull)
nulls[i] = 'n';
else
nulls[i] = ' ';
}
}
#endif
/* ----------------
* heap_formtuple
*
1998-09-01 05:29:17 +02:00
* constructs a tuple from the given *value and *null arrays
*
* old comments
* Handles alignment by aligning 2 byte attributes on short boundries
* and 3 or 4 byte attributes on long word boundries on a vax; and
* aligning non-byte attributes on short boundries on a sun. Does
* not properly align fixed length arrays of 1 or 2 byte types (yet).
*
* Null attributes are indicated by a 'n' in the appropriate byte
1998-09-01 05:29:17 +02:00
* of the *null. Non-null attributes are indicated by a ' ' (space).
*
* Fix me. (Figure that must keep context if debug--allow give oid.)
* Assumes in order.
* ----------------
*/
HeapTuple
heap_formtuple(TupleDesc tupleDescriptor,
1998-09-01 05:29:17 +02:00
Datum *value,
char *nulls)
{
1999-05-25 18:15:34 +02:00
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
unsigned long len;
1999-05-25 18:15:34 +02:00
int hoff;
bool hasnull = false;
int i;
int numberOfAttributes = tupleDescriptor->natts;
if (numberOfAttributes > MaxTupleAttributeNumber)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
errmsg("number of columns (%d) exceeds limit (%d)",
numberOfAttributes, MaxTupleAttributeNumber)));
for (i = 0; i < numberOfAttributes; i++)
{
if (nulls[i] != ' ')
{
hasnull = true;
break;
}
}
len = offsetof(HeapTupleHeaderData, t_bits);
if (hasnull)
len += BITMAPLEN(numberOfAttributes);
if (tupleDescriptor->tdhasoid)
len += sizeof(Oid);
hoff = len = MAXALIGN(len); /* align user data safely */
len += ComputeDataSize(tupleDescriptor, value, nulls);
1998-11-27 20:52:36 +01:00
tuple = (HeapTuple) palloc(HEAPTUPLESIZE + len);
tuple->t_datamcxt = CurrentMemoryContext;
1999-03-14 21:17:20 +01:00
td = tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
MemSet((char *) td, 0, len);
tuple->t_len = len;
1998-11-27 20:52:36 +01:00
ItemPointerSetInvalid(&(tuple->t_self));
tuple->t_tableOid = InvalidOid;
1998-11-27 20:52:36 +01:00
td->t_natts = numberOfAttributes;
td->t_hoff = hoff;
2003-08-04 02:43:34 +02:00
if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
td->t_infomask = HEAP_HASOID;
DataFill((char *) td + hoff,
tupleDescriptor,
value,
nulls,
1998-11-27 20:52:36 +01:00
&td->t_infomask,
(hasnull ? td->t_bits : NULL));
1998-09-01 05:29:17 +02:00
return tuple;
}
/* ----------------
* heap_modifytuple
*
* forms a new tuple from an old tuple and a set of replacement values.
* returns a new palloc'ed tuple.
* ----------------
*/
HeapTuple
heap_modifytuple(HeapTuple tuple,
Relation relation,
1998-09-01 05:29:17 +02:00
Datum *replValue,
char *replNull,
char *repl)
{
int attoff;
int numberOfAttributes;
Datum *value;
char *nulls;
bool isNull;
HeapTuple newTuple;
/*
* sanity checks
*/
Assert(HeapTupleIsValid(tuple));
Assert(RelationIsValid(relation));
Assert(PointerIsValid(replValue));
Assert(PointerIsValid(replNull));
Assert(PointerIsValid(repl));
1998-09-01 05:29:17 +02:00
numberOfAttributes = RelationGetForm(relation)->relnatts;
/*
* allocate and fill *value and *nulls arrays from either the tuple or
* the repl information, as appropriate.
*/
value = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
nulls = (char *) palloc(numberOfAttributes * sizeof(char));
for (attoff = 0; attoff < numberOfAttributes; attoff++)
{
if (repl[attoff] == ' ')
{
value[attoff] = heap_getattr(tuple,
1999-05-25 18:15:34 +02:00
AttrOffsetGetAttrNumber(attoff),
RelationGetDescr(relation),
&isNull);
nulls[attoff] = (isNull) ? 'n' : ' ';
}
else if (repl[attoff] == 'r')
{
value[attoff] = replValue[attoff];
nulls[attoff] = replNull[attoff];
}
else
elog(ERROR, "unrecognized replace flag: %d", (int) repl[attoff]);
}
/*
* create a new tuple from the *values and *nulls arrays
*/
1998-09-01 05:29:17 +02:00
newTuple = heap_formtuple(RelationGetDescr(relation),
value,
nulls);
pfree(value);
pfree(nulls);
/*
2002-09-04 22:31:48 +02:00
* copy the identification info of the old tuple: t_ctid, t_self, and
* OID (if any)
*/
newTuple->t_data->t_ctid = tuple->t_data->t_ctid;
1998-11-27 20:52:36 +01:00
newTuple->t_self = tuple->t_self;
newTuple->t_tableOid = tuple->t_tableOid;
if (relation->rd_rel->relhasoids)
HeapTupleSetOid(newTuple, HeapTupleGetOid(tuple));
1999-05-25 18:15:34 +02:00
return newTuple;
}
/* ----------------
* heap_freetuple
* ----------------
*/
void
heap_freetuple(HeapTuple htup)
{
if (htup->t_data != NULL)
if (htup->t_datamcxt != NULL && (char *) (htup->t_data) !=
((char *) htup + HEAPTUPLESIZE))
pfree(htup->t_data);
pfree(htup);
}
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
/* ----------------
* heap_addheader
*
* This routine forms a HeapTuple by copying the given structure (tuple
* data) and adding a generic header. Note that the tuple data is
* presumed to contain no null fields and no varlena fields.
*
* This routine is really only useful for certain system tables that are
* known to be fixed-width and null-free. It is used in some places for
* pg_class, but that is a gross hack (it only works because relacl can
* be omitted from the tuple entirely in those places).
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
* ----------------
*/
HeapTuple
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
heap_addheader(int natts, /* max domain index */
bool withoid, /* reserve space for oid */
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
Size structlen, /* its length */
void *structure) /* pointer to the struct */
{
1999-05-25 18:15:34 +02:00
HeapTuple tuple;
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
HeapTupleHeader td;
Size len;
1999-05-25 18:15:34 +02:00
int hoff;
AssertArg(natts > 0);
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
/* header needs no null bitmap */
hoff = offsetof(HeapTupleHeaderData, t_bits);
if (withoid)
hoff += sizeof(Oid);
hoff = MAXALIGN(hoff);
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
len = hoff + structlen;
1998-11-27 20:52:36 +01:00
tuple = (HeapTuple) palloc(HEAPTUPLESIZE + len);
tuple->t_len = len;
ItemPointerSetInvalid(&(tuple->t_self));
tuple->t_tableOid = InvalidOid;
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
tuple->t_datamcxt = CurrentMemoryContext;
tuple->t_data = td = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
MemSet((char *) td, 0, hoff);
1998-11-27 20:52:36 +01:00
td->t_natts = natts;
td->t_hoff = hoff;
if (withoid) /* else leave infomask = 0 */
td->t_infomask = HEAP_HASOID;
Clean up various to-do items associated with system indexes: pg_database now has unique indexes on oid and on datname. pg_shadow now has unique indexes on usename and on usesysid. pg_am now has unique index on oid. pg_opclass now has unique index on oid. pg_amproc now has unique index on amid+amopclaid+amprocnum. Remove pg_rewrite's unnecessary index on oid, delete unused RULEOID syscache. Remove index on pg_listener and associated syscache for performance reasons (caching rows that are certain to change before you need 'em again is rather pointless). Change pg_attrdef's nonunique index on adrelid into a unique index on adrelid+adnum. Fix various incorrect settings of pg_class.relisshared, make that the primary reference point for whether a relation is shared or not. IsSharedSystemRelationName() is now only consulted to initialize relisshared during initial creation of tables and indexes. In theory we might now support shared user relations, though it's not clear how one would get entries for them into pg_class &etc of multiple databases. Fix recently reported bug that pg_attribute rows created for an index all have the same OID. (Proof that non-unique OID doesn't matter unless it's actually used to do lookups ;-)) There's no need to treat pg_trigger, pg_attrdef, pg_relcheck as bootstrap relations. Convert them into plain system catalogs without hardwired entries in pg_class and friends. Unify global.bki and template1.bki into a single init script postgres.bki, since the alleged distinction between them was misleading and pointless. Not to mention that it didn't work for setting up indexes on shared system relations. Rationalize locking of pg_shadow, pg_group, pg_attrdef (no need to use AccessExclusiveLock where ExclusiveLock or even RowExclusiveLock will do). Also, hold locks until transaction commit where necessary.
2001-06-12 07:55:50 +02:00
memcpy((char *) td + hoff, structure, structlen);
1998-11-27 20:52:36 +01:00
return tuple;
}