From 1489b1ce728248e04da72aa32f87e9a634ebf9b8 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Thu, 17 Nov 2022 14:55:08 -0800 Subject: [PATCH] Standardize rmgrdesc recovery conflict XID output. Standardize on the name snapshotConflictHorizon for all XID fields from WAL records that generate recovery conflicts when in hot standby mode. This supersedes the previous latestRemovedXid naming convention. The new naming convention places emphasis on how the values are actually used by REDO routines. How the values are generated during original execution (details of which vary by record type) is deemphasized. Users of tools like pg_waldump can now grep for snapshotConflictHorizon to see all potential sources of recovery conflicts in a standardized way, without necessarily having to consider which specific record types might be involved. Also bring a couple of WAL record types that didn't follow any kind of naming convention into line. These are heapam's VISIBLE record type and SP-GiST's VACUUM_REDIRECT record type. Now every WAL record whose REDO routine calls ResolveRecoveryConflictWithSnapshot() passes through the snapshotConflictHorizon field from its WAL record. This is follow-up work to the refactoring from commit 9e540599 that made FREEZE_PAGE WAL records use a standard snapshotConflictHorizon style XID cutoff. No bump in XLOG_PAGE_MAGIC, since the underlying format of affected WAL records doesn't change. Author: Peter Geoghegan Reviewed-By: Andres Freund Discussion: https://postgr.es/m/CAH2-Wzm2CQUmViUq7Opgk=McVREHSOorYaAjR1ZpLYkRN7_dPw@mail.gmail.com --- src/backend/access/gist/gist.c | 6 +- src/backend/access/gist/gistxlog.c | 17 +++--- src/backend/access/hash/hash_xlog.c | 3 +- src/backend/access/hash/hashinsert.c | 10 ++-- src/backend/access/heap/heapam.c | 76 +++++++++++++++----------- src/backend/access/heap/pruneheap.c | 14 ++--- src/backend/access/index/genam.c | 21 +++---- src/backend/access/nbtree/README | 8 +-- src/backend/access/nbtree/nbtpage.c | 29 +++++----- src/backend/access/nbtree/nbtxlog.c | 7 ++- src/backend/access/rmgrdesc/gistdesc.c | 10 ++-- src/backend/access/rmgrdesc/hashdesc.c | 4 +- src/backend/access/rmgrdesc/heapdesc.c | 12 ++-- src/backend/access/rmgrdesc/nbtdesc.c | 11 ++-- src/backend/access/rmgrdesc/spgdesc.c | 4 +- src/backend/access/spgist/spgvacuum.c | 8 +-- src/backend/access/spgist/spgxlog.c | 11 ++-- src/backend/storage/ipc/procarray.c | 11 +++- src/backend/storage/ipc/standby.c | 27 ++++++--- src/include/access/gist_private.h | 4 +- src/include/access/gistxlog.h | 6 +- src/include/access/hash_xlog.h | 2 +- src/include/access/heapam_xlog.h | 14 +++-- src/include/access/nbtxlog.h | 6 +- src/include/access/spgxlog.h | 2 +- src/include/access/tableam.h | 2 +- src/include/storage/standby.h | 4 +- 27 files changed, 179 insertions(+), 150 deletions(-) diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 30069f139c..3e275e6700 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1665,10 +1665,10 @@ gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) if (ndeletable > 0) { - TransactionId latestRemovedXid = InvalidTransactionId; + TransactionId snapshotConflictHorizon = InvalidTransactionId; if (XLogStandbyInfoActive() && RelationNeedsWAL(rel)) - latestRemovedXid = + snapshotConflictHorizon = index_compute_xid_horizon_for_tuples(rel, heapRel, buffer, deletable, ndeletable); @@ -1694,7 +1694,7 @@ gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) recptr = gistXLogDelete(buffer, deletable, ndeletable, - latestRemovedXid); + snapshotConflictHorizon); PageSetLSN(page, recptr); } diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 998befd2cb..cb5affa3d2 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -195,7 +195,7 @@ gistRedoDeleteRecord(XLogReaderState *record) XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, + ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon, rlocator); } @@ -388,14 +388,14 @@ gistRedoPageReuse(XLogReaderState *record) * PAGE_REUSE records exist to provide a conflict point when we reuse * pages in the index via the FSM. That's all they do though. * - * latestRemovedXid was the page's deleteXid. The + * snapshotConflictHorizon was the page's deleteXid. The * GlobalVisCheckRemovableFullXid(deleteXid) test in gistPageRecyclable() * conceptually mirrors the PGPROC->xmin > limitXmin test in * GetConflictingVirtualXIDs(). Consequently, one XID value achieves the * same exclusion effect on primary and standby. */ if (InHotStandby) - ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid, + ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon, xlrec->locator); } @@ -597,7 +597,7 @@ gistXLogAssignLSN(void) * Write XLOG record about reuse of a deleted page. */ void -gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId latestRemovedXid) +gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId deleteXid) { gistxlogPageReuse xlrec_reuse; @@ -610,7 +610,7 @@ gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId latestRemov /* XLOG stuff */ xlrec_reuse.locator = rel->rd_locator; xlrec_reuse.block = blkno; - xlrec_reuse.latestRemovedFullXid = latestRemovedXid; + xlrec_reuse.snapshotConflictHorizon = deleteXid; XLogBeginInsert(); XLogRegisterData((char *) &xlrec_reuse, SizeOfGistxlogPageReuse); @@ -672,12 +672,12 @@ gistXLogUpdate(Buffer buffer, */ XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, - TransactionId latestRemovedXid) + TransactionId snapshotConflictHorizon) { gistxlogDelete xlrec; XLogRecPtr recptr; - xlrec.latestRemovedXid = latestRemovedXid; + xlrec.snapshotConflictHorizon = snapshotConflictHorizon; xlrec.ntodelete = ntodelete; XLogBeginInsert(); @@ -685,7 +685,8 @@ gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, /* * We need the target-offsets array whether or not we store the whole - * buffer, to allow us to find the latestRemovedXid on a standby server. + * buffer, to allow us to find the snapshotConflictHorizon on a standby + * server. */ XLogRegisterData((char *) todelete, ntodelete * sizeof(OffsetNumber)); diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index a24a1c3908..b452697a2f 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -1000,7 +1000,8 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) RelFileLocator rlocator; XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon, + rlocator); } action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer); diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 4f2fecb908..23907d2e5b 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -360,9 +360,9 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) if (ndeletable > 0) { - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; - latestRemovedXid = + snapshotConflictHorizon = index_compute_xid_horizon_for_tuples(rel, hrel, buf, deletable, ndeletable); @@ -399,7 +399,7 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) xl_hash_vacuum_one_page xlrec; XLogRecPtr recptr; - xlrec.latestRemovedXid = latestRemovedXid; + xlrec.snapshotConflictHorizon = snapshotConflictHorizon; xlrec.ntuples = ndeletable; XLogBeginInsert(); @@ -408,8 +408,8 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) /* * We need the target-offsets array whether or not we store the - * whole buffer, to allow us to find the latestRemovedXid on a - * standby server. + * whole buffer, to allow us to find the snapshotConflictHorizon + * on a standby server. */ XLogRegisterData((char *) deletable, ndeletable * sizeof(OffsetNumber)); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 9790ba2298..d18c5ca6f5 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6792,7 +6792,7 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer, Page page = BufferGetPage(buffer); Assert(ntuples > 0); - Assert(TransactionIdIsValid(FreezeLimit)); + Assert(TransactionIdIsNormal(FreezeLimit)); START_CRIT_SECTION(); @@ -6815,21 +6815,20 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer, int nplans; xl_heap_freeze_page xlrec; XLogRecPtr recptr; - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; /* Prepare deduplicated representation for use in WAL record */ nplans = heap_xlog_freeze_plan(tuples, ntuples, plans, offsets); /* - * latestRemovedXid describes the latest processed XID, whereas * FreezeLimit is (approximately) the first XID not frozen by VACUUM. * Back up caller's FreezeLimit to avoid false conflicts when * FreezeLimit is precisely equal to VACUUM's OldestXmin cutoff. */ - latestRemovedXid = FreezeLimit; - TransactionIdRetreat(latestRemovedXid); + snapshotConflictHorizon = FreezeLimit; + TransactionIdRetreat(snapshotConflictHorizon); - xlrec.latestRemovedXid = latestRemovedXid; + xlrec.snapshotConflictHorizon = snapshotConflictHorizon; xlrec.nplans = nplans; XLogBeginInsert(); @@ -7401,15 +7400,21 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, } /* - * If 'tuple' contains any visible XID greater than latestRemovedXid, - * ratchet forwards latestRemovedXid to the greatest one found. - * This is used as the basis for generating Hot Standby conflicts, so - * if a tuple was never visible then removing it should not conflict - * with queries. + * Maintain snapshotConflictHorizon for caller by ratcheting forward its value + * using any committed XIDs contained in 'tuple', an obsolescent heap tuple + * that caller is in the process of physically removing, e.g. via HOT pruning + * or index deletion. + * + * Caller must initialize its value to InvalidTransactionId, which is + * generally interpreted as "definitely no need for a recovery conflict". + * Final value must reflect all heap tuples that caller will physically remove + * (or remove TID references to) via its ongoing pruning/deletion operation. + * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from + * caller's WAL record) by REDO routine when it replays caller's operation. */ void -HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, - TransactionId *latestRemovedXid) +HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, + TransactionId *snapshotConflictHorizon) { TransactionId xmin = HeapTupleHeaderGetXmin(tuple); TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple); @@ -7417,8 +7422,8 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, if (tuple->t_infomask & HEAP_MOVED) { - if (TransactionIdPrecedes(*latestRemovedXid, xvac)) - *latestRemovedXid = xvac; + if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac)) + *snapshotConflictHorizon = xvac; } /* @@ -7431,11 +7436,9 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin))) { if (xmax != xmin && - TransactionIdFollows(xmax, *latestRemovedXid)) - *latestRemovedXid = xmax; + TransactionIdFollows(xmax, *snapshotConflictHorizon)) + *snapshotConflictHorizon = xmax; } - - /* *latestRemovedXid may still be invalid at end */ } #ifdef USE_PREFETCH @@ -7558,7 +7561,7 @@ TransactionId heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) { /* Initial assumption is that earlier pruning took care of conflict */ - TransactionId latestRemovedXid = InvalidTransactionId; + TransactionId snapshotConflictHorizon = InvalidTransactionId; BlockNumber blkno = InvalidBlockNumber; Buffer buf = InvalidBuffer; Page page = NULL; @@ -7769,8 +7772,8 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) } /* - * Maintain latestRemovedXid value for deletion operation as a whole - * by advancing current value using heap tuple headers. This is + * Maintain snapshotConflictHorizon value for deletion operation as a + * whole by advancing current value using heap tuple headers. This is * loosely based on the logic for pruning a HOT chain. */ offnum = ItemPointerGetOffsetNumber(htid); @@ -7805,12 +7808,12 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) * LP_DEAD item. This is okay because the earlier pruning * operation that made the line pointer LP_DEAD in the first place * must have considered the original tuple header as part of - * generating its own latestRemovedXid value. + * generating its own snapshotConflictHorizon value. * * Relying on XLOG_HEAP2_PRUNE records like this is the same * strategy that index vacuuming uses in all cases. Index VACUUM - * WAL records don't even have a latestRemovedXid field of their - * own for this reason. + * WAL records don't even have a snapshotConflictHorizon field of + * their own for this reason. */ if (!ItemIdIsNormal(lp)) break; @@ -7824,7 +7827,8 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax)) break; - HeapTupleHeaderAdvanceLatestRemovedXid(htup, &latestRemovedXid); + HeapTupleHeaderAdvanceConflictHorizon(htup, + &snapshotConflictHorizon); /* * If the tuple is not HOT-updated, then we are at the end of this @@ -7856,7 +7860,7 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) Assert(finalndeltids > 0 || delstate->bottomup); delstate->ndeltids = finalndeltids; - return latestRemovedXid; + return snapshotConflictHorizon; } /* @@ -8232,6 +8236,9 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate) * corresponding visibility map block. Both should have already been modified * and dirtied. * + * snapshotConflictHorizon comes from the largest xmin on the page being + * marked all-visible. REDO routine uses it to generate recovery conflicts. + * * If checksums or wal_log_hints are enabled, we may also generate a full-page * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not* @@ -8239,7 +8246,7 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate) */ XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer, - TransactionId cutoff_xid, uint8 vmflags) + TransactionId snapshotConflictHorizon, uint8 vmflags) { xl_heap_visible xlrec; XLogRecPtr recptr; @@ -8248,7 +8255,7 @@ log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer, Assert(BufferIsValid(heap_buffer)); Assert(BufferIsValid(vm_buffer)); - xlrec.cutoff_xid = cutoff_xid; + xlrec.snapshotConflictHorizon = snapshotConflictHorizon; xlrec.flags = vmflags; XLogBeginInsert(); XLogRegisterData((char *) &xlrec, SizeOfHeapVisible); @@ -8683,7 +8690,8 @@ heap_xlog_prune(XLogReaderState *record) * no queries running for which the removed tuples are still visible. */ if (InHotStandby) - ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon, + rlocator); /* * If we have a full-page image, restore it (using a cleanup lock) and @@ -8851,7 +8859,8 @@ heap_xlog_visible(XLogReaderState *record) * rather than killing the transaction outright. */ if (InHotStandby) - ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon, + rlocator); /* * Read the heap page, if it still exists. If the heap file has dropped or @@ -8939,7 +8948,7 @@ heap_xlog_visible(XLogReaderState *record) visibilitymap_pin(reln, blkno, &vmbuffer); visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer, - xlrec->cutoff_xid, xlrec->flags); + xlrec->snapshotConflictHorizon, xlrec->flags); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); @@ -9105,7 +9114,8 @@ heap_xlog_freeze_page(XLogReaderState *record) RelFileLocator rlocator; XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon, + rlocator); } if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 9f43bbe25f..91c5f5e9ef 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -49,7 +49,7 @@ typedef struct bool old_snap_used; TransactionId new_prune_xid; /* new prune hint value for page */ - TransactionId latestRemovedXid; /* latest xid to be removed by this prune */ + TransactionId snapshotConflictHorizon; /* latest xid removed */ int nredirected; /* numbers of entries in arrays below */ int ndead; int nunused; @@ -295,7 +295,7 @@ heap_page_prune(Relation relation, Buffer buffer, prstate.old_snap_xmin = old_snap_xmin; prstate.old_snap_ts = old_snap_ts; prstate.old_snap_used = false; - prstate.latestRemovedXid = InvalidTransactionId; + prstate.snapshotConflictHorizon = InvalidTransactionId; prstate.nredirected = prstate.ndead = prstate.nunused = 0; memset(prstate.marked, 0, sizeof(prstate.marked)); @@ -418,7 +418,7 @@ heap_page_prune(Relation relation, Buffer buffer, xl_heap_prune xlrec; XLogRecPtr recptr; - xlrec.latestRemovedXid = prstate.latestRemovedXid; + xlrec.snapshotConflictHorizon = prstate.snapshotConflictHorizon; xlrec.nredirected = prstate.nredirected; xlrec.ndead = prstate.ndead; @@ -636,8 +636,8 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate) !HeapTupleHeaderIsHotUpdated(htup)) { heap_prune_record_unused(prstate, rootoffnum); - HeapTupleHeaderAdvanceLatestRemovedXid(htup, - &prstate->latestRemovedXid); + HeapTupleHeaderAdvanceConflictHorizon(htup, + &prstate->snapshotConflictHorizon); ndeleted++; } @@ -773,8 +773,8 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate) if (tupdead) { latestdead = offnum; - HeapTupleHeaderAdvanceLatestRemovedXid(htup, - &prstate->latestRemovedXid); + HeapTupleHeaderAdvanceConflictHorizon(htup, + &prstate->snapshotConflictHorizon); } else if (!recent_dead) break; diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 98af5347b9..01d08960b4 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -275,14 +275,15 @@ BuildIndexValueDescription(Relation indexRelation, } /* - * Get the latestRemovedXid from the table entries pointed at by the index - * tuples being deleted using an AM-generic approach. + * Get the snapshotConflictHorizon from the table entries pointed to by the + * index tuples being deleted using an AM-generic approach. * - * This is a table_index_delete_tuples() shim used by index AMs that have - * simple requirements. These callers only need to consult the tableam to get - * a latestRemovedXid value, and only expect to delete tuples that are already - * known deletable. When a latestRemovedXid value isn't needed in index AM's - * deletion WAL record, it is safe for it to skip calling here entirely. + * This is a table_index_delete_tuples() shim used by index AMs that only need + * to consult the tableam to get a snapshotConflictHorizon value, and only + * expect to delete index tuples that are already known deletable (typically + * due to having LP_DEAD bits set). When a snapshotConflictHorizon value + * isn't needed in index AM's deletion WAL record, it is safe for it to skip + * calling here entirely. * * We assume that caller index AM uses the standard IndexTuple representation, * with table TIDs stored in the t_tid field. We also expect (and assert) @@ -297,7 +298,7 @@ index_compute_xid_horizon_for_tuples(Relation irel, int nitems) { TM_IndexDeleteOp delstate; - TransactionId latestRemovedXid = InvalidTransactionId; + TransactionId snapshotConflictHorizon = InvalidTransactionId; Page ipage = BufferGetPage(ibuf); IndexTuple itup; @@ -333,7 +334,7 @@ index_compute_xid_horizon_for_tuples(Relation irel, } /* determine the actual xid horizon */ - latestRemovedXid = table_index_delete_tuples(hrel, &delstate); + snapshotConflictHorizon = table_index_delete_tuples(hrel, &delstate); /* assert tableam agrees that all items are deletable */ Assert(delstate.ndeltids == nitems); @@ -341,7 +342,7 @@ index_compute_xid_horizon_for_tuples(Relation irel, pfree(delstate.deltids); pfree(delstate.status); - return latestRemovedXid; + return snapshotConflictHorizon; } diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index 5529afc1fe..dd0f7ad2bd 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -528,17 +528,17 @@ from the index immediately; since index scans only stop "between" pages, no scan can lose its place from such a deletion. We separate the steps because we allow LP_DEAD to be set with only a share lock (it's like a hint bit for a heap tuple), but physically deleting tuples requires an -exclusive lock. We also need to generate a latestRemovedXid value for +exclusive lock. We also need to generate a snapshotConflictHorizon for each deletion operation's WAL record, which requires additional coordinating with the tableam when the deletion actually takes place. -(This latestRemovedXid value may be used to generate a recovery conflict -during subsequent REDO of the record by a standby.) +(snapshotConflictHorizon value may be used to generate a conflict during +subsequent REDO of the record by a standby.) Delaying and batching index tuple deletion like this enables a further optimization: opportunistic checking of "extra" nearby index tuples (tuples that are not LP_DEAD-set) when they happen to be very cheap to check in passing (because we already know that the tableam will be -visiting their table block to generate a latestRemovedXid value). Any +visiting their table block to generate a snapshotConflictHorizon). Any index tuples that turn out to be safe to delete will also be deleted. Simple deletion will behave as if the extra tuples that actually turn out to be delete-safe had their LP_DEAD bits set right from the start. diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 8b96708b3e..65aa44893c 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -41,7 +41,7 @@ static BTMetaPageData *_bt_getmeta(Relation rel, Buffer metabuf); static void _bt_log_reuse_page(Relation rel, BlockNumber blkno, FullTransactionId safexid); static void _bt_delitems_delete(Relation rel, Buffer buf, - TransactionId latestRemovedXid, + TransactionId snapshotConflictHorizon, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable); static char *_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable, @@ -838,7 +838,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, FullTransactionId safexid) /* XLOG stuff */ xlrec_reuse.locator = rel->rd_locator; xlrec_reuse.block = blkno; - xlrec_reuse.latestRemovedFullXid = safexid; + xlrec_reuse.snapshotConflictHorizon = safexid; XLogBeginInsert(); XLogRegisterData((char *) &xlrec_reuse, SizeOfBtreeReusePage); @@ -1156,7 +1156,7 @@ _bt_pageinit(Page page, Size size) * (a version that lacks the TIDs that are to be deleted). * * We record VACUUMs and b-tree deletes differently in WAL. Deletes must - * generate their own latestRemovedXid by accessing the table directly, + * generate their own snapshotConflictHorizon directly from the tableam, * whereas VACUUMs rely on the initial VACUUM table scan performing * WAL-logging that takes care of the issue for the table's indexes * indirectly. Also, we remove the VACUUM cycle ID from pages, which b-tree @@ -1287,13 +1287,14 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, * (a version that lacks the TIDs that are to be deleted). * * This is nearly the same as _bt_delitems_vacuum as far as what it does to - * the page, but it needs its own latestRemovedXid from caller (caller gets - * this from tableam). This is used by the REDO routine to generate recovery + * the page, but it needs its own snapshotConflictHorizon (caller gets this + * from tableam). This is used by the REDO routine to generate recovery * conflicts. The other difference is that only _bt_delitems_vacuum will * clear page's VACUUM cycle ID. */ static void -_bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid, +_bt_delitems_delete(Relation rel, Buffer buf, + TransactionId snapshotConflictHorizon, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable) { @@ -1357,7 +1358,7 @@ _bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid, XLogRecPtr recptr; xl_btree_delete xlrec_delete; - xlrec_delete.latestRemovedXid = latestRemovedXid; + xlrec_delete.snapshotConflictHorizon = snapshotConflictHorizon; xlrec_delete.ndeleted = ndeletable; xlrec_delete.nupdated = nupdatable; @@ -1529,7 +1530,7 @@ _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, TM_IndexDeleteOp *delstate) { Page page = BufferGetPage(buf); - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; OffsetNumber postingidxoffnum = InvalidOffsetNumber; int ndeletable = 0, nupdatable = 0; @@ -1537,11 +1538,11 @@ _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, BTVacuumPosting updatable[MaxIndexTuplesPerPage]; /* Use tableam interface to determine which tuples to delete first */ - latestRemovedXid = table_index_delete_tuples(heapRel, delstate); + snapshotConflictHorizon = table_index_delete_tuples(heapRel, delstate); - /* Should not WAL-log latestRemovedXid unless it's required */ - if (!XLogStandbyInfoActive() || !RelationNeedsWAL(rel)) - latestRemovedXid = InvalidTransactionId; + /* Should not WAL-log snapshotConflictHorizon unless it's required */ + if (!XLogStandbyInfoActive()) + snapshotConflictHorizon = InvalidTransactionId; /* * Construct a leaf-page-wise description of what _bt_delitems_delete() @@ -1683,8 +1684,8 @@ _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, } /* Physically delete tuples (or TIDs) using deletable (or updatable) */ - _bt_delitems_delete(rel, buf, latestRemovedXid, deletable, ndeletable, - updatable, nupdatable); + _bt_delitems_delete(rel, buf, snapshotConflictHorizon, + deletable, ndeletable, updatable, nupdatable); /* be tidy */ for (int i = 0; i < nupdatable; i++) diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index ad489e33b3..3e311a98a6 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -668,7 +668,8 @@ btree_xlog_delete(XLogReaderState *record) XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon, + rlocator); } /* @@ -991,7 +992,7 @@ btree_xlog_newroot(XLogReaderState *record) * xl_btree_reuse_page record at the point that a page is actually recycled * and reused for an entirely unrelated page inside _bt_split(). These * records include the same safexid value from the original deleted page, - * stored in the record's latestRemovedFullXid field. + * stored in the record's snapshotConflictHorizon field. * * The GlobalVisCheckRemovableFullXid() test in BTPageIsRecyclable() is used * to determine if it's safe to recycle a page. This mirrors our own test: @@ -1005,7 +1006,7 @@ btree_xlog_reuse_page(XLogReaderState *record) xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) XLogRecGetData(record); if (InHotStandby) - ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid, + ResolveRecoveryConflictWithSnapshotFullXid(xlrec->snapshotConflictHorizon, xlrec->locator); } diff --git a/src/backend/access/rmgrdesc/gistdesc.c b/src/backend/access/rmgrdesc/gistdesc.c index 7dd3c1d500..97f3520abb 100644 --- a/src/backend/access/rmgrdesc/gistdesc.c +++ b/src/backend/access/rmgrdesc/gistdesc.c @@ -26,18 +26,18 @@ out_gistxlogPageUpdate(StringInfo buf, gistxlogPageUpdate *xlrec) static void out_gistxlogPageReuse(StringInfo buf, gistxlogPageReuse *xlrec) { - appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestRemovedXid %u:%u", + appendStringInfo(buf, "rel %u/%u/%u; blk %u; snapshotConflictHorizon %u:%u", xlrec->locator.spcOid, xlrec->locator.dbOid, xlrec->locator.relNumber, xlrec->block, - EpochFromFullTransactionId(xlrec->latestRemovedFullXid), - XidFromFullTransactionId(xlrec->latestRemovedFullXid)); + EpochFromFullTransactionId(xlrec->snapshotConflictHorizon), + XidFromFullTransactionId(xlrec->snapshotConflictHorizon)); } static void out_gistxlogDelete(StringInfo buf, gistxlogDelete *xlrec) { - appendStringInfo(buf, "delete: latestRemovedXid %u, nitems: %u", - xlrec->latestRemovedXid, xlrec->ntodelete); + appendStringInfo(buf, "delete: snapshotConflictHorizon %u, nitems: %u", + xlrec->snapshotConflictHorizon, xlrec->ntodelete); } static void diff --git a/src/backend/access/rmgrdesc/hashdesc.c b/src/backend/access/rmgrdesc/hashdesc.c index ef443bdb16..a5b861bdad 100644 --- a/src/backend/access/rmgrdesc/hashdesc.c +++ b/src/backend/access/rmgrdesc/hashdesc.c @@ -113,9 +113,9 @@ hash_desc(StringInfo buf, XLogReaderState *record) { xl_hash_vacuum_one_page *xlrec = (xl_hash_vacuum_one_page *) rec; - appendStringInfo(buf, "ntuples %d, latestRemovedXid %u", + appendStringInfo(buf, "ntuples %d, snapshotConflictHorizon %u", xlrec->ntuples, - xlrec->latestRemovedXid); + xlrec->snapshotConflictHorizon); break; } } diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c index 3f8c5e63f3..325aee93ff 100644 --- a/src/backend/access/rmgrdesc/heapdesc.c +++ b/src/backend/access/rmgrdesc/heapdesc.c @@ -125,8 +125,8 @@ heap2_desc(StringInfo buf, XLogReaderState *record) { xl_heap_prune *xlrec = (xl_heap_prune *) rec; - appendStringInfo(buf, "latestRemovedXid %u nredirected %u ndead %u", - xlrec->latestRemovedXid, + appendStringInfo(buf, "snapshotConflictHorizon %u nredirected %u ndead %u", + xlrec->snapshotConflictHorizon, xlrec->nredirected, xlrec->ndead); } @@ -140,15 +140,15 @@ heap2_desc(StringInfo buf, XLogReaderState *record) { xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) rec; - appendStringInfo(buf, "latestRemovedXid %u nplans %u", - xlrec->latestRemovedXid, xlrec->nplans); + appendStringInfo(buf, "snapshotConflictHorizon %u nplans %u", + xlrec->snapshotConflictHorizon, xlrec->nplans); } else if (info == XLOG_HEAP2_VISIBLE) { xl_heap_visible *xlrec = (xl_heap_visible *) rec; - appendStringInfo(buf, "cutoff xid %u flags 0x%02X", - xlrec->cutoff_xid, xlrec->flags); + appendStringInfo(buf, "snapshotConflictHorizon %u flags 0x%02X", + xlrec->snapshotConflictHorizon, xlrec->flags); } else if (info == XLOG_HEAP2_MULTI_INSERT) { diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c index 4843cd530d..f4a70d54e6 100644 --- a/src/backend/access/rmgrdesc/nbtdesc.c +++ b/src/backend/access/rmgrdesc/nbtdesc.c @@ -63,8 +63,9 @@ btree_desc(StringInfo buf, XLogReaderState *record) { xl_btree_delete *xlrec = (xl_btree_delete *) rec; - appendStringInfo(buf, "latestRemovedXid %u; ndeleted %u; nupdated %u", - xlrec->latestRemovedXid, xlrec->ndeleted, xlrec->nupdated); + appendStringInfo(buf, "snapshotConflictHorizon %u; ndeleted %u; nupdated %u", + xlrec->snapshotConflictHorizon, + xlrec->ndeleted, xlrec->nupdated); break; } case XLOG_BTREE_MARK_PAGE_HALFDEAD: @@ -100,11 +101,11 @@ btree_desc(StringInfo buf, XLogReaderState *record) { xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) rec; - appendStringInfo(buf, "rel %u/%u/%u; latestRemovedXid %u:%u", + appendStringInfo(buf, "rel %u/%u/%u; snapshotConflictHorizon %u:%u", xlrec->locator.spcOid, xlrec->locator.dbOid, xlrec->locator.relNumber, - EpochFromFullTransactionId(xlrec->latestRemovedFullXid), - XidFromFullTransactionId(xlrec->latestRemovedFullXid)); + EpochFromFullTransactionId(xlrec->snapshotConflictHorizon), + XidFromFullTransactionId(xlrec->snapshotConflictHorizon)); break; } case XLOG_BTREE_META_CLEANUP: diff --git a/src/backend/access/rmgrdesc/spgdesc.c b/src/backend/access/rmgrdesc/spgdesc.c index d5d921a42a..308bd3e27f 100644 --- a/src/backend/access/rmgrdesc/spgdesc.c +++ b/src/backend/access/rmgrdesc/spgdesc.c @@ -118,10 +118,10 @@ spg_desc(StringInfo buf, XLogReaderState *record) { spgxlogVacuumRedirect *xlrec = (spgxlogVacuumRedirect *) rec; - appendStringInfo(buf, "ntoplaceholder: %u, firstplaceholder: %u, newestredirectxid: %u", + appendStringInfo(buf, "ntoplaceholder: %u, firstplaceholder: %u, snapshotConflictHorizon: %u", xlrec->nToPlaceholder, xlrec->firstPlaceholder, - xlrec->newestRedirectXid); + xlrec->snapshotConflictHorizon); } break; } diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 0049630532..ad90b213b9 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -504,7 +504,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) GlobalVisState *vistest; xlrec.nToPlaceholder = 0; - xlrec.newestRedirectXid = InvalidTransactionId; + xlrec.snapshotConflictHorizon = InvalidTransactionId; /* XXX: providing heap relation would allow more pruning */ vistest = GlobalVisTestFor(NULL); @@ -533,9 +533,9 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) opaque->nPlaceholder++; /* remember newest XID among the removed redirects */ - if (!TransactionIdIsValid(xlrec.newestRedirectXid) || - TransactionIdPrecedes(xlrec.newestRedirectXid, dt->xid)) - xlrec.newestRedirectXid = dt->xid; + if (!TransactionIdIsValid(xlrec.snapshotConflictHorizon) || + TransactionIdPrecedes(xlrec.snapshotConflictHorizon, dt->xid)) + xlrec.snapshotConflictHorizon = dt->xid; ItemPointerSetInvalid(&dt->pointer); diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 4c9f4020ff..44adc2098f 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -875,14 +875,11 @@ spgRedoVacuumRedirect(XLogReaderState *record) */ if (InHotStandby) { - if (TransactionIdIsValid(xldata->newestRedirectXid)) - { - RelFileLocator locator; + RelFileLocator locator; - XLogRecGetBlockTag(record, 0, &locator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xldata->newestRedirectXid, - locator); - } + XLogRecGetBlockTag(record, 0, &locator, NULL, NULL); + ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon, + locator); } if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 9e8b6756fe..283517d956 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -3337,12 +3337,17 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0, * GetConflictingVirtualXIDs -- returns an array of currently active VXIDs. * * Usage is limited to conflict resolution during recovery on standby servers. - * limitXmin is supplied as either latestRemovedXid, or InvalidTransactionId - * in cases where we cannot accurately determine a value for latestRemovedXid. + * limitXmin is supplied as either a cutoff with snapshotConflictHorizon + * semantics, or InvalidTransactionId in cases where caller cannot accurately + * determine a safe snapshotConflictHorizon value. * * If limitXmin is InvalidTransactionId then we want to kill everybody, * so we're not worried if they have a snapshot or not, nor does it really - * matter what type of lock we hold. + * matter what type of lock we hold. Caller must avoid calling here with + * snapshotConflictHorizon style cutoffs that were set to InvalidTransactionId + * during original execution, since that actually indicates that there is + * definitely no need for a recovery conflict (the snapshotConflictHorizon + * convention for InvalidTransactionId values is the opposite of our own!). * * All callers that are checking xmins always now supply a valid and useful * value for limitXmin. The limitXmin is always lower than the lowest diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 7db86f7885..f43229dfda 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -464,8 +464,18 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist, } } +/* + * Generate whatever recovery conflicts are needed to eliminate snapshots that + * might see XIDs <= snapshotConflictHorizon as still running. + * + * snapshotConflictHorizon cutoffs are our standard approach to generating + * granular recovery conflicts. Note that InvalidTransactionId values are + * interpreted as "definitely don't need any conflicts" here, which is a + * general convention that WAL records can (and often do) depend on. + */ void -ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocator locator) +ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, + RelFileLocator locator) { VirtualTransactionId *backends; @@ -480,12 +490,11 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocat * which is sufficient for the deletion operation must take place before * replay of the deletion record itself). */ - if (!TransactionIdIsValid(latestRemovedXid)) + if (!TransactionIdIsValid(snapshotConflictHorizon)) return; - backends = GetConflictingVirtualXIDs(latestRemovedXid, + backends = GetConflictingVirtualXIDs(snapshotConflictHorizon, locator.dbOid); - ResolveRecoveryConflictWithVirtualXIDs(backends, PROCSIG_RECOVERY_CONFLICT_SNAPSHOT, WAIT_EVENT_RECOVERY_CONFLICT_SNAPSHOT, @@ -497,7 +506,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocat * FullTransactionId values */ void -ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid, +ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId snapshotConflictHorizon, RelFileLocator locator) { /* @@ -510,13 +519,13 @@ ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXi uint64 diff; diff = U64FromFullTransactionId(nextXid) - - U64FromFullTransactionId(latestRemovedFullXid); + U64FromFullTransactionId(snapshotConflictHorizon); if (diff < MaxTransactionId / 2) { - TransactionId latestRemovedXid; + TransactionId truncated; - latestRemovedXid = XidFromFullTransactionId(latestRemovedFullXid); - ResolveRecoveryConflictWithSnapshot(latestRemovedXid, locator); + truncated = XidFromFullTransactionId(snapshotConflictHorizon); + ResolveRecoveryConflictWithSnapshot(truncated, locator); } } diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 093bf23443..a84949747b 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -441,7 +441,7 @@ extern XLogRecPtr gistXLogPageDelete(Buffer buffer, OffsetNumber downlinkOffset); extern void gistXLogPageReuse(Relation rel, BlockNumber blkno, - FullTransactionId latestRemovedXid); + FullTransactionId deleteXid); extern XLogRecPtr gistXLogUpdate(Buffer buffer, OffsetNumber *todelete, int ntodelete, @@ -449,7 +449,7 @@ extern XLogRecPtr gistXLogUpdate(Buffer buffer, Buffer leftchildbuf); extern XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, - int ntodelete, TransactionId latestRemovedXid); + int ntodelete, TransactionId snapshotConflictHorizon); extern XLogRecPtr gistXLogSplit(bool page_is_leaf, SplitedPageLayout *dist, diff --git a/src/include/access/gistxlog.h b/src/include/access/gistxlog.h index 9bbe4c2622..33f1c7e31b 100644 --- a/src/include/access/gistxlog.h +++ b/src/include/access/gistxlog.h @@ -49,7 +49,7 @@ typedef struct gistxlogPageUpdate */ typedef struct gistxlogDelete { - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; uint16 ntodelete; /* number of deleted offsets */ /* @@ -99,10 +99,10 @@ typedef struct gistxlogPageReuse { RelFileLocator locator; BlockNumber block; - FullTransactionId latestRemovedFullXid; + FullTransactionId snapshotConflictHorizon; } gistxlogPageReuse; -#define SizeOfGistxlogPageReuse (offsetof(gistxlogPageReuse, latestRemovedFullXid) + sizeof(FullTransactionId)) +#define SizeOfGistxlogPageReuse (offsetof(gistxlogPageReuse, snapshotConflictHorizon) + sizeof(FullTransactionId)) extern void gist_redo(XLogReaderState *record); extern void gist_desc(StringInfo buf, XLogReaderState *record); diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h index 59230706bb..6dafb4a598 100644 --- a/src/include/access/hash_xlog.h +++ b/src/include/access/hash_xlog.h @@ -250,7 +250,7 @@ typedef struct xl_hash_init_bitmap_page */ typedef struct xl_hash_vacuum_one_page { - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; int ntuples; /* TARGET OFFSET NUMBERS FOLLOW AT THE END */ diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index bbf164719e..5c77290eec 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -242,7 +242,7 @@ typedef struct xl_heap_update */ typedef struct xl_heap_prune { - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; uint16 nredirected; uint16 ndead; /* OFFSET NUMBERS are in the block reference 0 */ @@ -342,7 +342,7 @@ typedef struct xl_heap_freeze_plan */ typedef struct xl_heap_freeze_page { - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; uint16 nplans; /* FREEZE PLANS FOLLOW */ @@ -359,7 +359,7 @@ typedef struct xl_heap_freeze_page */ typedef struct xl_heap_visible { - TransactionId cutoff_xid; + TransactionId snapshotConflictHorizon; uint8 flags; } xl_heap_visible; @@ -396,8 +396,8 @@ typedef struct xl_heap_rewrite_mapping XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */ } xl_heap_rewrite_mapping; -extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, - TransactionId *latestRemovedXid); +extern void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, + TransactionId *snapshotConflictHorizon); extern void heap_redo(XLogReaderState *record); extern void heap_desc(StringInfo buf, XLogReaderState *record); @@ -409,6 +409,8 @@ extern const char *heap2_identify(uint8 info); extern void heap_xlog_logical_rewrite(XLogReaderState *r); extern XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, - Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags); + Buffer vm_buffer, + TransactionId snapshotConflictHorizon, + uint8 vmflags); #endif /* HEAPAM_XLOG_H */ diff --git a/src/include/access/nbtxlog.h b/src/include/access/nbtxlog.h index dd504d1885..3b2d959c69 100644 --- a/src/include/access/nbtxlog.h +++ b/src/include/access/nbtxlog.h @@ -187,7 +187,7 @@ typedef struct xl_btree_reuse_page { RelFileLocator locator; BlockNumber block; - FullTransactionId latestRemovedFullXid; + FullTransactionId snapshotConflictHorizon; } xl_btree_reuse_page; #define SizeOfBtreeReusePage (sizeof(xl_btree_reuse_page)) @@ -199,7 +199,7 @@ typedef struct xl_btree_reuse_page * when btinsert() is called. * * The records are very similar. The only difference is that xl_btree_delete - * has to include a latestRemovedXid field to generate recovery conflicts. + * has a snapshotConflictHorizon field to generate recovery conflicts. * (VACUUM operations can just rely on earlier conflicts generated during * pruning of the table whose TIDs the to-be-deleted index tuples point to. * There are also small differences between each REDO routine that we don't go @@ -232,7 +232,7 @@ typedef struct xl_btree_vacuum typedef struct xl_btree_delete { - TransactionId latestRemovedXid; + TransactionId snapshotConflictHorizon; uint16 ndeleted; uint16 nupdated; diff --git a/src/include/access/spgxlog.h b/src/include/access/spgxlog.h index 930ffdd4f7..82332cb694 100644 --- a/src/include/access/spgxlog.h +++ b/src/include/access/spgxlog.h @@ -239,7 +239,7 @@ typedef struct spgxlogVacuumRedirect { uint16 nToPlaceholder; /* number of redirects to make placeholders */ OffsetNumber firstPlaceholder; /* first placeholder tuple to remove */ - TransactionId newestRedirectXid; /* newest XID of removed redirects */ + TransactionId snapshotConflictHorizon; /* newest XID of removed redirects */ /* offsets of redirect tuples to make placeholders follow */ OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]; diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index e45d73eae3..4d1ef405c2 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -1318,7 +1318,7 @@ table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, * marked as deletable. See comments above TM_IndexDelete and comments above * TM_IndexDeleteOp for full details. * - * Returns a latestRemovedXid transaction ID that caller generally places in + * Returns a snapshotConflictHorizon transaction ID that caller places in * its index deletion WAL record. This might be used during subsequent REDO * of the WAL record when in Hot Standby mode -- a recovery conflict for the * index deletion operation might be required on the standby. diff --git a/src/include/storage/standby.h b/src/include/storage/standby.h index f5da98dc73..e46c934c56 100644 --- a/src/include/storage/standby.h +++ b/src/include/storage/standby.h @@ -29,9 +29,9 @@ extern PGDLLIMPORT bool log_recovery_conflict_waits; extern void InitRecoveryTransactionEnvironment(void); extern void ShutdownRecoveryTransactionEnvironment(void); -extern void ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, +extern void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, RelFileLocator locator); -extern void ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid, +extern void ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId snapshotConflictHorizon, RelFileLocator locator); extern void ResolveRecoveryConflictWithTablespace(Oid tsid); extern void ResolveRecoveryConflictWithDatabase(Oid dbid);