Remove LVPagePruneState.

Commit cb970240f1 moved some code from
lazy_scan_heap() to lazy_scan_prune(), and now some things that used to
need to be passed back and forth are completely local to lazy_scan_prune().
Hence, this struct is mostly obsolete.  The only thing that still
needs to be passed back to the caller is has_lpdead_items, and that's
also passed back by lazy_scan_noprune(), so do it the same way in both
cases.

Melanie Plageman, reviewed and slightly revised by me.

Discussion: http://postgr.es/m/CAAKRu_aM=OL85AOr-80wBsCr=vLVzhnaavqkVPRkFBtD0zsuLQ@mail.gmail.com
This commit is contained in:
Robert Haas 2024-01-18 15:17:09 -05:00
parent cb970240f1
commit e313a61137
2 changed files with 69 additions and 67 deletions

View File

@ -212,23 +212,6 @@ typedef struct LVRelState
int64 missed_dead_tuples; /* # removable, but not removed */ int64 missed_dead_tuples; /* # removable, but not removed */
} LVRelState; } LVRelState;
/*
* State returned by lazy_scan_prune()
*/
typedef struct LVPagePruneState
{
bool has_lpdead_items; /* includes existing LP_DEAD items */
/*
* State describes the proper VM bit states to set for the page following
* pruning and freezing. all_visible implies !has_lpdead_items, but don't
* trust all_frozen result unless all_visible is also set to true.
*/
bool all_visible; /* Every item visible to all? */
bool all_frozen; /* provided all_visible is also true */
TransactionId visibility_cutoff_xid; /* For recovery conflicts */
} LVPagePruneState;
/* Struct for saving and restoring vacuum error information. */ /* Struct for saving and restoring vacuum error information. */
typedef struct LVSavedErrInfo typedef struct LVSavedErrInfo
{ {
@ -250,7 +233,7 @@ static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page, BlockNumber blkno, Page page,
Buffer vmbuffer, bool all_visible_according_to_vm, Buffer vmbuffer, bool all_visible_according_to_vm,
LVPagePruneState *prunestate); bool *has_lpdead_items);
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page, BlockNumber blkno, Page page,
bool *has_lpdead_items); bool *has_lpdead_items);
@ -854,7 +837,7 @@ lazy_scan_heap(LVRelState *vacrel)
Buffer buf; Buffer buf;
Page page; Page page;
bool all_visible_according_to_vm; bool all_visible_according_to_vm;
LVPagePruneState prunestate; bool has_lpdead_items;
if (blkno == next_unskippable_block) if (blkno == next_unskippable_block)
{ {
@ -959,8 +942,6 @@ lazy_scan_heap(LVRelState *vacrel)
page = BufferGetPage(buf); page = BufferGetPage(buf);
if (!ConditionalLockBufferForCleanup(buf)) if (!ConditionalLockBufferForCleanup(buf))
{ {
bool has_lpdead_items;
LockBuffer(buf, BUFFER_LOCK_SHARE); LockBuffer(buf, BUFFER_LOCK_SHARE);
/* Check for new or empty pages before lazy_scan_noprune call */ /* Check for new or empty pages before lazy_scan_noprune call */
@ -1035,7 +1016,7 @@ lazy_scan_heap(LVRelState *vacrel)
*/ */
lazy_scan_prune(vacrel, buf, blkno, page, lazy_scan_prune(vacrel, buf, blkno, page,
vmbuffer, all_visible_according_to_vm, vmbuffer, all_visible_according_to_vm,
&prunestate); &has_lpdead_items);
/* /*
* Final steps for block: drop cleanup lock, record free space in the * Final steps for block: drop cleanup lock, record free space in the
@ -1056,7 +1037,7 @@ lazy_scan_heap(LVRelState *vacrel)
*/ */
if (vacrel->nindexes == 0 if (vacrel->nindexes == 0
|| !vacrel->do_index_vacuuming || !vacrel->do_index_vacuuming
|| !prunestate.has_lpdead_items) || !has_lpdead_items)
{ {
Size freespace = PageGetHeapFreeSpace(page); Size freespace = PageGetHeapFreeSpace(page);
@ -1068,7 +1049,7 @@ lazy_scan_heap(LVRelState *vacrel)
* visible on upper FSM pages. This is done after vacuuming if the * visible on upper FSM pages. This is done after vacuuming if the
* table has indexes. * table has indexes.
*/ */
if (vacrel->nindexes == 0 && prunestate.has_lpdead_items && if (vacrel->nindexes == 0 && has_lpdead_items &&
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
{ {
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
@ -1383,6 +1364,14 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
* right after this operation completes instead of in the middle of it. Note that * right after this operation completes instead of in the middle of it. Note that
* any tuple that becomes dead after the call to heap_page_prune() can't need to * any tuple that becomes dead after the call to heap_page_prune() can't need to
* be frozen, because it was visible to another session when vacuum started. * be frozen, because it was visible to another session when vacuum started.
*
* vmbuffer is the buffer containing the VM block with visibility information
* for the heap block, blkno. all_visible_according_to_vm is the saved
* visibility status of the heap block looked up earlier by the caller. We
* won't rely entirely on this status, as it may be out of date.
*
* *has_lpdead_items is set to true or false depending on whether, upon return
* from this function, any LP_DEAD items are still present on the page.
*/ */
static void static void
lazy_scan_prune(LVRelState *vacrel, lazy_scan_prune(LVRelState *vacrel,
@ -1391,7 +1380,7 @@ lazy_scan_prune(LVRelState *vacrel,
Page page, Page page,
Buffer vmbuffer, Buffer vmbuffer,
bool all_visible_according_to_vm, bool all_visible_according_to_vm,
LVPagePruneState *prunestate) bool *has_lpdead_items)
{ {
Relation rel = vacrel->rel; Relation rel = vacrel->rel;
OffsetNumber offnum, OffsetNumber offnum,
@ -1404,6 +1393,9 @@ lazy_scan_prune(LVRelState *vacrel,
recently_dead_tuples; recently_dead_tuples;
HeapPageFreeze pagefrz; HeapPageFreeze pagefrz;
bool hastup = false; bool hastup = false;
bool all_visible,
all_frozen;
TransactionId visibility_cutoff_xid;
int64 fpi_before = pgWalUsage.wal_fpi; int64 fpi_before = pgWalUsage.wal_fpi;
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]; OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
HeapTupleFreeze frozen[MaxHeapTuplesPerPage]; HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
@ -1444,14 +1436,22 @@ lazy_scan_prune(LVRelState *vacrel,
&presult, &vacrel->offnum); &presult, &vacrel->offnum);
/* /*
* Now scan the page to collect LP_DEAD items and check for tuples * We will update the VM after collecting LP_DEAD items and freezing
* requiring freezing among remaining tuples with storage * tuples. Keep track of whether or not the page is all_visible and
* all_frozen and use this information to update the VM. all_visible
* implies 0 lpdead_items, but don't trust all_frozen result unless
* all_visible is also set to true.
*
* Also keep track of the visibility cutoff xid for recovery conflicts.
*/ */
prunestate->has_lpdead_items = false; all_visible = true;
prunestate->all_visible = true; all_frozen = true;
prunestate->all_frozen = true; visibility_cutoff_xid = InvalidTransactionId;
prunestate->visibility_cutoff_xid = InvalidTransactionId;
/*
* Now scan the page to collect LP_DEAD items and update the variables set
* just above.
*/
for (offnum = FirstOffsetNumber; for (offnum = FirstOffsetNumber;
offnum <= maxoff; offnum <= maxoff;
offnum = OffsetNumberNext(offnum)) offnum = OffsetNumberNext(offnum))
@ -1538,13 +1538,13 @@ lazy_scan_prune(LVRelState *vacrel,
* asynchronously. See SetHintBits for more info. Check that * asynchronously. See SetHintBits for more info. Check that
* the tuple is hinted xmin-committed because of that. * the tuple is hinted xmin-committed because of that.
*/ */
if (prunestate->all_visible) if (all_visible)
{ {
TransactionId xmin; TransactionId xmin;
if (!HeapTupleHeaderXminCommitted(htup)) if (!HeapTupleHeaderXminCommitted(htup))
{ {
prunestate->all_visible = false; all_visible = false;
break; break;
} }
@ -1556,14 +1556,14 @@ lazy_scan_prune(LVRelState *vacrel,
if (!TransactionIdPrecedes(xmin, if (!TransactionIdPrecedes(xmin,
vacrel->cutoffs.OldestXmin)) vacrel->cutoffs.OldestXmin))
{ {
prunestate->all_visible = false; all_visible = false;
break; break;
} }
/* Track newest xmin on page. */ /* Track newest xmin on page. */
if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid) && if (TransactionIdFollows(xmin, visibility_cutoff_xid) &&
TransactionIdIsNormal(xmin)) TransactionIdIsNormal(xmin))
prunestate->visibility_cutoff_xid = xmin; visibility_cutoff_xid = xmin;
} }
break; break;
case HEAPTUPLE_RECENTLY_DEAD: case HEAPTUPLE_RECENTLY_DEAD:
@ -1574,7 +1574,7 @@ lazy_scan_prune(LVRelState *vacrel,
* pruning.) * pruning.)
*/ */
recently_dead_tuples++; recently_dead_tuples++;
prunestate->all_visible = false; all_visible = false;
break; break;
case HEAPTUPLE_INSERT_IN_PROGRESS: case HEAPTUPLE_INSERT_IN_PROGRESS:
@ -1585,11 +1585,11 @@ lazy_scan_prune(LVRelState *vacrel,
* results. This assumption is a bit shaky, but it is what * results. This assumption is a bit shaky, but it is what
* acquire_sample_rows() does, so be consistent. * acquire_sample_rows() does, so be consistent.
*/ */
prunestate->all_visible = false; all_visible = false;
break; break;
case HEAPTUPLE_DELETE_IN_PROGRESS: case HEAPTUPLE_DELETE_IN_PROGRESS:
/* This is an expected case during concurrent vacuum */ /* This is an expected case during concurrent vacuum */
prunestate->all_visible = false; all_visible = false;
/* /*
* Count such rows as live. As above, we assume the deleting * Count such rows as live. As above, we assume the deleting
@ -1619,7 +1619,7 @@ lazy_scan_prune(LVRelState *vacrel,
* definitely cannot be set all-frozen in the visibility map later on * definitely cannot be set all-frozen in the visibility map later on
*/ */
if (!totally_frozen) if (!totally_frozen)
prunestate->all_frozen = false; all_frozen = false;
} }
/* /*
@ -1637,7 +1637,7 @@ lazy_scan_prune(LVRelState *vacrel,
* page all-frozen afterwards (might not happen until final heap pass). * page all-frozen afterwards (might not happen until final heap pass).
*/ */
if (pagefrz.freeze_required || tuples_frozen == 0 || if (pagefrz.freeze_required || tuples_frozen == 0 ||
(prunestate->all_visible && prunestate->all_frozen && (all_visible && all_frozen &&
fpi_before != pgWalUsage.wal_fpi)) fpi_before != pgWalUsage.wal_fpi))
{ {
/* /*
@ -1675,11 +1675,11 @@ lazy_scan_prune(LVRelState *vacrel,
* once we're done with it. Otherwise we generate a conservative * once we're done with it. Otherwise we generate a conservative
* cutoff by stepping back from OldestXmin. * cutoff by stepping back from OldestXmin.
*/ */
if (prunestate->all_visible && prunestate->all_frozen) if (all_visible && all_frozen)
{ {
/* Using same cutoff when setting VM is now unnecessary */ /* Using same cutoff when setting VM is now unnecessary */
snapshotConflictHorizon = prunestate->visibility_cutoff_xid; snapshotConflictHorizon = visibility_cutoff_xid;
prunestate->visibility_cutoff_xid = InvalidTransactionId; visibility_cutoff_xid = InvalidTransactionId;
} }
else else
{ {
@ -1702,7 +1702,7 @@ lazy_scan_prune(LVRelState *vacrel,
*/ */
vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid; vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid;
vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid; vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid;
prunestate->all_frozen = false; all_frozen = false;
tuples_frozen = 0; /* avoid miscounts in instrumentation */ tuples_frozen = 0; /* avoid miscounts in instrumentation */
} }
@ -1715,16 +1715,17 @@ lazy_scan_prune(LVRelState *vacrel,
*/ */
#ifdef USE_ASSERT_CHECKING #ifdef USE_ASSERT_CHECKING
/* Note that all_frozen value does not matter when !all_visible */ /* Note that all_frozen value does not matter when !all_visible */
if (prunestate->all_visible && lpdead_items == 0) if (all_visible && lpdead_items == 0)
{ {
TransactionId cutoff; TransactionId debug_cutoff;
bool all_frozen; bool debug_all_frozen;
if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen)) if (!heap_page_is_all_visible(vacrel, buf,
&debug_cutoff, &debug_all_frozen))
Assert(false); Assert(false);
Assert(!TransactionIdIsValid(cutoff) || Assert(!TransactionIdIsValid(debug_cutoff) ||
cutoff == prunestate->visibility_cutoff_xid); debug_cutoff == visibility_cutoff_xid);
} }
#endif #endif
@ -1737,7 +1738,6 @@ lazy_scan_prune(LVRelState *vacrel,
ItemPointerData tmp; ItemPointerData tmp;
vacrel->lpdead_item_pages++; vacrel->lpdead_item_pages++;
prunestate->has_lpdead_items = true;
ItemPointerSetBlockNumber(&tmp, blkno); ItemPointerSetBlockNumber(&tmp, blkno);
@ -1762,7 +1762,7 @@ lazy_scan_prune(LVRelState *vacrel,
* Now that freezing has been finalized, unset all_visible. It needs * Now that freezing has been finalized, unset all_visible. It needs
* to reflect the present state of things, as expected by our caller. * to reflect the present state of things, as expected by our caller.
*/ */
prunestate->all_visible = false; all_visible = false;
} }
/* Finally, add page-local counts to whole-VACUUM counts */ /* Finally, add page-local counts to whole-VACUUM counts */
@ -1776,19 +1776,23 @@ lazy_scan_prune(LVRelState *vacrel,
if (hastup) if (hastup)
vacrel->nonempty_pages = blkno + 1; vacrel->nonempty_pages = blkno + 1;
Assert(!prunestate->all_visible || !prunestate->has_lpdead_items); /* Did we find LP_DEAD items? */
*has_lpdead_items = (lpdead_items > 0);
Assert(!all_visible || !(*has_lpdead_items));
/* /*
* Handle setting visibility map bit based on information from the VM (as * Handle setting visibility map bit based on information from the VM (as
* of last lazy_scan_skip() call), and from prunestate * of last lazy_scan_skip() call), and from all_visible and all_frozen
* variables
*/ */
if (!all_visible_according_to_vm && prunestate->all_visible) if (!all_visible_according_to_vm && all_visible)
{ {
uint8 flags = VISIBILITYMAP_ALL_VISIBLE; uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
if (prunestate->all_frozen) if (all_frozen)
{ {
Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid)); Assert(!TransactionIdIsValid(visibility_cutoff_xid));
flags |= VISIBILITYMAP_ALL_FROZEN; flags |= VISIBILITYMAP_ALL_FROZEN;
} }
@ -1808,7 +1812,7 @@ lazy_scan_prune(LVRelState *vacrel,
PageSetAllVisible(page); PageSetAllVisible(page);
MarkBufferDirty(buf); MarkBufferDirty(buf);
visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, prunestate->visibility_cutoff_xid, vmbuffer, visibility_cutoff_xid,
flags); flags);
} }
@ -1841,7 +1845,7 @@ lazy_scan_prune(LVRelState *vacrel,
* There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set, * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set,
* however. * however.
*/ */
else if (prunestate->has_lpdead_items && PageIsAllVisible(page)) else if (lpdead_items > 0 && PageIsAllVisible(page))
{ {
elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u", elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u",
vacrel->relname, blkno); vacrel->relname, blkno);
@ -1854,16 +1858,15 @@ lazy_scan_prune(LVRelState *vacrel,
/* /*
* If the all-visible page is all-frozen but not marked as such yet, mark * If the all-visible page is all-frozen but not marked as such yet, mark
* it as all-frozen. Note that all_frozen is only valid if all_visible is * it as all-frozen. Note that all_frozen is only valid if all_visible is
* true, so we must check both prunestate fields. * true, so we must check both all_visible and all_frozen.
*/ */
else if (all_visible_according_to_vm && prunestate->all_visible && else if (all_visible_according_to_vm && all_visible &&
prunestate->all_frozen && all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
!VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
{ {
/* /*
* Avoid relying on all_visible_according_to_vm as a proxy for the * Avoid relying on all_visible_according_to_vm as a proxy for the
* page-level PD_ALL_VISIBLE bit being set, since it might have become * page-level PD_ALL_VISIBLE bit being set, since it might have become
* stale -- even when all_visible is set in prunestate * stale -- even when all_visible is set
*/ */
if (!PageIsAllVisible(page)) if (!PageIsAllVisible(page))
{ {
@ -1878,7 +1881,7 @@ lazy_scan_prune(LVRelState *vacrel,
* since a snapshotConflictHorizon sufficient to make everything safe * since a snapshotConflictHorizon sufficient to make everything safe
* for REDO was logged when the page's tuples were frozen. * for REDO was logged when the page's tuples were frozen.
*/ */
Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid)); Assert(!TransactionIdIsValid(visibility_cutoff_xid));
visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, InvalidTransactionId, vmbuffer, InvalidTransactionId,
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_VISIBLE |

View File

@ -1405,7 +1405,6 @@ LPVOID
LPWSTR LPWSTR
LSEG LSEG
LUID LUID
LVPagePruneState
LVRelState LVRelState
LVSavedErrInfo LVSavedErrInfo
LWLock LWLock