Remove redundant IndexTupleDSize macro.

Use IndexTupleSize everywhere, instead.  Also, remove IndexTupleSize's
internal typecast, as that's not really needed and might mask coding
errors.  Change some pointer variable datatypes in the call sites
to compensate for that and make it clearer what we're assuming.

Ildar Musin, Robert Haas, Stephen Frost

Discussion: https://postgr.es/m/0274288e-9e88-13b6-c61c-7b36928bf221@postgrespro.ru
This commit is contained in:
Tom Lane 2018-02-28 19:25:54 -05:00
parent d3b851e9a3
commit d79e7e92bf
9 changed files with 33 additions and 28 deletions

View File

@ -558,7 +558,7 @@ hash_xlog_move_page_contents(XLogReaderState *record)
Size itemsz;
OffsetNumber l;
itemsz = IndexTupleDSize(*itup);
itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz);
data += itemsz;
@ -686,7 +686,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
Size itemsz;
OffsetNumber l;
itemsz = IndexTupleDSize(*itup);
itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz);
data += itemsz;

View File

@ -55,7 +55,7 @@ _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
hashkey = _hash_get_indextuple_hashkey(itup);
/* compute item size too */
itemsz = IndexTupleDSize(*itup);
itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
@ -222,7 +222,7 @@ restart_insert:
XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup));
XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INSERT);
@ -309,7 +309,7 @@ _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups,
{
Size itemsize;
itemsize = IndexTupleDSize(*itups[i]);
itemsize = IndexTupleSize(itups[i]);
itemsize = MAXALIGN(itemsize);
/* Find where to insert the tuple (preserving page's hashkey ordering) */

View File

@ -891,7 +891,7 @@ readpage:
itup = (IndexTuple) PageGetItem(rpage,
PageGetItemId(rpage, roffnum));
itemsz = IndexTupleDSize(*itup);
itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz);
/*

View File

@ -1173,7 +1173,7 @@ _hash_splitbucket(Relation rel,
* the current page in the new bucket, we must allocate a new
* overflow page and place the tuple on that page instead.
*/
itemsz = IndexTupleDSize(*new_itup);
itemsz = IndexTupleSize(new_itup);
itemsz = MAXALIGN(itemsz);
if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))

View File

@ -67,9 +67,9 @@ RelationPutHeapTuple(Relation relation,
if (!token)
{
ItemId itemId = PageGetItemId(pageHeader, offnum);
Item item = PageGetItem(pageHeader, itemId);
HeapTupleHeader item = (HeapTupleHeader) PageGetItem(pageHeader, itemId);
((HeapTupleHeader) item)->t_ctid = tuple->t_self;
item->t_ctid = tuple->t_self;
}
}

View File

@ -558,7 +558,7 @@ _bt_findinsertloc(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
itemsz = IndexTupleDSize(*newtup);
itemsz = IndexTupleSize(newtup);
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
@ -755,7 +755,7 @@ _bt_insertonpg(Relation rel,
elog(ERROR, "cannot insert to incompletely split page %u",
BufferGetBlockNumber(buf));
itemsz = IndexTupleDSize(*itup);
itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
@ -914,7 +914,7 @@ _bt_insertonpg(Relation rel,
sizeof(IndexTupleData));
}
else
XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup));
XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
recptr = XLogInsert(RM_BTREE_ID, xlinfo);

View File

@ -813,7 +813,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
last_off = state->btps_lastoff;
pgspc = PageGetFreeSpace(npage);
itupsz = IndexTupleDSize(*itup);
itupsz = IndexTupleSize(itup);
itupsz = MAXALIGN(itupsz);
/*

View File

@ -51,9 +51,15 @@ _bt_restore_page(Page page, char *from, int len)
i = 0;
while (from < end)
{
/* Need to copy tuple header due to alignment considerations */
/*
* As we step through the items, 'from' won't always be properly
* aligned, so we need to use memcpy(). Further, we use Item (which
* is just a char*) here for our items array for the same reason;
* wouldn't want the compiler or anyone thinking that an item is
* aligned when it isn't.
*/
memcpy(&itupdata, from, sizeof(IndexTupleData));
itemsz = IndexTupleDSize(itupdata);
itemsz = IndexTupleSize(&itupdata);
itemsz = MAXALIGN(itemsz);
items[i] = (Item) from;
@ -205,7 +211,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
BTPageOpaque ropaque;
char *datapos;
Size datalen;
Item left_hikey = NULL;
IndexTuple left_hikey = NULL;
Size left_hikeysz = 0;
BlockNumber leftsib;
BlockNumber rightsib;
@ -248,7 +254,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
{
ItemId hiItemId = PageGetItemId(rpage, P_FIRSTDATAKEY(ropaque));
left_hikey = PageGetItem(rpage, hiItemId);
left_hikey = (IndexTuple) PageGetItem(rpage, hiItemId);
left_hikeysz = ItemIdGetLength(hiItemId);
}
@ -272,7 +278,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
OffsetNumber off;
Item newitem = NULL;
IndexTuple newitem = NULL;
Size newitemsz = 0;
Page newlpage;
OffsetNumber leftoff;
@ -281,7 +287,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
if (onleft)
{
newitem = (Item) datapos;
newitem = (IndexTuple) datapos;
newitemsz = MAXALIGN(IndexTupleSize(newitem));
datapos += newitemsz;
datalen -= newitemsz;
@ -290,7 +296,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
/* Extract left hikey and its size (assuming 16-bit alignment) */
if (!isleaf)
{
left_hikey = (Item) datapos;
left_hikey = (IndexTuple) datapos;
left_hikeysz = MAXALIGN(IndexTupleSize(left_hikey));
datapos += left_hikeysz;
datalen -= left_hikeysz;
@ -301,7 +307,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
/* Set high key */
leftoff = P_HIKEY;
if (PageAddItem(newlpage, left_hikey, left_hikeysz,
if (PageAddItem(newlpage, (Item) left_hikey, left_hikeysz,
P_HIKEY, false, false) == InvalidOffsetNumber)
elog(PANIC, "failed to add high key to left page after split");
leftoff = OffsetNumberNext(leftoff);
@ -310,12 +316,12 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
{
ItemId itemid;
Size itemsz;
Item item;
IndexTuple item;
/* add the new item if it was inserted on left page */
if (onleft && off == xlrec->newitemoff)
{
if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
if (PageAddItem(newlpage, (Item) newitem, newitemsz, leftoff,
false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add new item to left page after split");
leftoff = OffsetNumberNext(leftoff);
@ -323,8 +329,8 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
itemid = PageGetItemId(lpage, off);
itemsz = ItemIdGetLength(itemid);
item = PageGetItem(lpage, itemid);
if (PageAddItem(newlpage, item, itemsz, leftoff,
item = (IndexTuple) PageGetItem(lpage, itemid);
if (PageAddItem(newlpage, (Item) item, itemsz, leftoff,
false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add old item to left page after split");
leftoff = OffsetNumberNext(leftoff);
@ -333,7 +339,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
/* cope with possibility that newitem goes at the end */
if (onleft && off == xlrec->newitemoff)
{
if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
if (PageAddItem(newlpage, (Item) newitem, newitemsz, leftoff,
false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add new item to left page after split");
leftoff = OffsetNumberNext(leftoff);

View File

@ -67,8 +67,7 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap;
#define INDEX_VAR_MASK 0x4000
#define INDEX_NULL_MASK 0x8000
#define IndexTupleSize(itup) ((Size) (((IndexTuple) (itup))->t_info & INDEX_SIZE_MASK))
#define IndexTupleDSize(itup) ((Size) ((itup).t_info & INDEX_SIZE_MASK))
#define IndexTupleSize(itup) ((Size) ((itup)->t_info & INDEX_SIZE_MASK))
#define IndexTupleHasNulls(itup) ((((IndexTuple) (itup))->t_info & INDEX_NULL_MASK))
#define IndexTupleHasVarwidths(itup) ((((IndexTuple) (itup))->t_info & INDEX_VAR_MASK))