diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 2c98405aac..96b7593fc1 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -15,9 +15,9 @@ #include "postgres.h" -#include "access/heapam.h" #include "access/nbtree.h" #include "access/nbtxlog.h" +#include "access/tableam.h" #include "access/transam.h" #include "access/xloginsert.h" #include "miscadmin.h" @@ -431,12 +431,14 @@ _bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel, } /* - * We check the whole HOT-chain to see if there is any tuple - * that satisfies SnapshotDirty. This is necessary because we - * have just a single index entry for the entire chain. + * Check if there's any table tuples for this index entry + * satisfying SnapshotDirty. This is necessary because for AMs + * with optimizations like heap's HOT, we have just a single + * index entry for the entire chain. */ - else if (heap_hot_search(&htid, heapRel, &SnapshotDirty, - &all_dead)) + else if (table_index_fetch_tuple_check(heapRel, &htid, + &SnapshotDirty, + &all_dead)) { TransactionId xwait; @@ -489,7 +491,8 @@ _bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel, * entry. */ htid = itup->t_tid; - if (heap_hot_search(&htid, heapRel, SnapshotSelf, NULL)) + if (table_index_fetch_tuple_check(heapRel, &htid, + SnapshotSelf, NULL)) { /* Normal case --- it's still live */ } diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 2762a2d548..46e0831834 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -57,10 +57,10 @@ #include "postgres.h" -#include "access/heapam.h" #include "access/nbtree.h" #include "access/parallel.h" #include "access/relscan.h" +#include "access/table.h" #include "access/tableam.h" #include "access/xact.h" #include "access/xlog.h" diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index b1e3198291..8ad4c62943 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -176,6 +176,40 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan) } +/* ---------------------------------------------------------------------------- + * Index scan related functions. + * ---------------------------------------------------------------------------- + */ + +/* + * To perform that check simply start an index scan, create the necessary + * slot, do the heap lookup, and shut everything down again. This could be + * optimized, but is unlikely to matter from a performance POV. If there + * frequently are live index pointers also matching a unique index key, the + * CPU overhead of this routine is unlikely to matter. + */ +bool +table_index_fetch_tuple_check(Relation rel, + ItemPointer tid, + Snapshot snapshot, + bool *all_dead) +{ + IndexFetchTableData *scan; + TupleTableSlot *slot; + bool call_again = false; + bool found; + + slot = table_slot_create(rel, NULL); + scan = table_index_fetch_begin(rel); + found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again, + all_dead); + table_index_fetch_end(scan); + ExecDropSingleTupleTableSlot(slot); + + return found; +} + + /* ---------------------------------------------------------------------------- * Functions to make modifications a bit simpler. * ---------------------------------------------------------------------------- diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 51c370e6ca..29371f4c47 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -256,9 +256,10 @@ typedef struct TableAmRoutine * needs be set to true by index_fetch_tuple, signalling to the caller * that index_fetch_tuple should be called again for the same tid. * - * *all_dead should be set to true by index_fetch_tuple iff it is - * guaranteed that no backend needs to see that tuple. Index AMs can use - * that do avoid returning that tid in future searches. + * *all_dead, if all_dead is not NULL, should be set to true if by + * index_fetch_tuple iff it is guaranteed that no backend needs to see + * that tuple. Index AMs can use that do avoid returning that tid in + * future searches. */ bool (*index_fetch_tuple) (struct IndexFetchTableData *scan, ItemPointer tid, @@ -594,9 +595,10 @@ table_index_fetch_end(struct IndexFetchTableData *scan) * will be set to true, signalling that table_index_fetch_tuple() should be called * again for the same tid. * - * *all_dead will be set to true by table_index_fetch_tuple() iff it is guaranteed - * that no backend needs to see that tuple. Index AMs can use that do avoid - * returning that tid in future searches. + * *all_dead, if all_dead is not NULL, will be set to true by + * table_index_fetch_tuple() iff it is guaranteed that no backend needs to see + * that tuple. Index AMs can use that do avoid returning that tid in future + * searches. * * The difference between this function and table_fetch_row_version is that * this function returns the currently visible version of a row if the AM @@ -618,6 +620,17 @@ table_index_fetch_tuple(struct IndexFetchTableData *scan, all_dead); } +/* + * This is a convenience wrapper around table_index_fetch_tuple() which + * returns whether there are table tuple items corresponding to an index + * entry. This likely is only useful to verify if there's a conflict in a + * unique index. + */ +extern bool table_index_fetch_tuple_check(Relation rel, + ItemPointer tid, + Snapshot snapshot, + bool *all_dead); + /* ------------------------------------------------------------------------ * Functions for non-modifying operations on individual tuples