From a5213adf3d351a31c5f5eae1a756a9d3555dc31c Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Wed, 27 Oct 2021 12:10:47 -0700 Subject: [PATCH] Further harden nbtree posting split code. Add more defensive checks around posting list split code. These should detect corruption involving duplicate table TIDs earlier and more reliably than any existing check. Follow up to commit 8f72bbac. Discussion: https://postgr.es/m/CAH2-WzkrSY_kjyd1_M5xJK1uM0govJXMxPn8JUSvwcUOiHuWVw@mail.gmail.com Backpatch: 13-, where nbtree deduplication was introduced. --- src/backend/access/nbtree/nbtinsert.c | 23 +++++++++++++++++++++-- src/backend/access/nbtree/nbtsearch.c | 17 +++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 7355e1dba1..a755aee55e 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -1164,10 +1164,29 @@ _bt_insertonpg(Relation rel, * its post-split version is treated as an extra step in either the * insert or page split critical section. */ - Assert(isleaf && !ItemIdIsDead(itemid)); - Assert(itup_key->heapkeyspace && itup_key->allequalimage); + Assert(isleaf && itup_key->heapkeyspace && itup_key->allequalimage); oposting = (IndexTuple) PageGetItem(page, itemid); + /* + * postingoff value comes from earlier call to _bt_binsrch_posting(). + * Its binary search might think that a plain tuple must be a posting + * list tuple that needs to be split. This can happen with corruption + * involving an existing plain tuple that is a duplicate of the new + * item, up to and including its table TID. Check for that here in + * passing. + * + * Also verify that our caller has made sure that the existing posting + * list tuple does not have its LP_DEAD bit set. + */ + if (!BTreeTupleIsPosting(oposting) || ItemIdIsDead(itemid)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg_internal("table tid from new index tuple (%u,%u) overlaps with invalid duplicate tuple at offset %u of block %u in index \"%s\"", + ItemPointerGetBlockNumber(&itup->t_tid), + ItemPointerGetOffsetNumber(&itup->t_tid), + BufferGetBlockNumber(buf), newitemoff, + RelationGetRelationName(rel)))); + /* use a mutable copy of itup as our itup from here on */ origitup = itup; itup = CopyIndexTuple(origitup); diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index d1177d8772..fdf0e5654a 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -526,7 +526,24 @@ _bt_binsrch_insert(Relation rel, BTInsertState insertstate) * infrequently. */ if (unlikely(result == 0 && key->scantid != NULL)) + { + /* + * postingoff should never be set more than once per leaf page + * binary search. That would mean that there are duplicate table + * TIDs in the index, which is never okay. Check for that here. + */ + if (insertstate->postingoff != 0) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg_internal("table tid from new index tuple (%u,%u) cannot find insert offset between offsets %u and %u of block %u in index \"%s\"", + ItemPointerGetBlockNumber(key->scantid), + ItemPointerGetOffsetNumber(key->scantid), + low, stricthigh, + BufferGetBlockNumber(insertstate->buf), + RelationGetRelationName(rel)))); + insertstate->postingoff = _bt_binsrch_posting(key, page, mid); + } } /*