diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 34bc6b6890..0fb3953513 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -212,23 +212,6 @@ typedef struct LVRelState int64 missed_dead_tuples; /* # removable, but not removed */ } LVRelState; -/* - * State returned by lazy_scan_prune() - */ -typedef struct LVPagePruneState -{ - bool has_lpdead_items; /* includes existing LP_DEAD items */ - - /* - * State describes the proper VM bit states to set for the page following - * pruning and freezing. all_visible implies !has_lpdead_items, but don't - * trust all_frozen result unless all_visible is also set to true. - */ - bool all_visible; /* Every item visible to all? */ - bool all_frozen; /* provided all_visible is also true */ - TransactionId visibility_cutoff_xid; /* For recovery conflicts */ -} LVPagePruneState; - /* Struct for saving and restoring vacuum error information. */ typedef struct LVSavedErrInfo { @@ -250,7 +233,7 @@ static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, static void lazy_scan_prune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, - LVPagePruneState *prunestate); + bool *has_lpdead_items); static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, BlockNumber blkno, Page page, bool *has_lpdead_items); @@ -854,7 +837,7 @@ lazy_scan_heap(LVRelState *vacrel) Buffer buf; Page page; bool all_visible_according_to_vm; - LVPagePruneState prunestate; + bool has_lpdead_items; if (blkno == next_unskippable_block) { @@ -959,8 +942,6 @@ lazy_scan_heap(LVRelState *vacrel) page = BufferGetPage(buf); if (!ConditionalLockBufferForCleanup(buf)) { - bool has_lpdead_items; - LockBuffer(buf, BUFFER_LOCK_SHARE); /* Check for new or empty pages before lazy_scan_noprune call */ @@ -1035,7 +1016,7 @@ lazy_scan_heap(LVRelState *vacrel) */ lazy_scan_prune(vacrel, buf, blkno, page, vmbuffer, all_visible_according_to_vm, - &prunestate); + &has_lpdead_items); /* * Final steps for block: drop cleanup lock, record free space in the @@ -1056,7 +1037,7 @@ lazy_scan_heap(LVRelState *vacrel) */ if (vacrel->nindexes == 0 || !vacrel->do_index_vacuuming - || !prunestate.has_lpdead_items) + || !has_lpdead_items) { Size freespace = PageGetHeapFreeSpace(page); @@ -1068,7 +1049,7 @@ lazy_scan_heap(LVRelState *vacrel) * visible on upper FSM pages. This is done after vacuuming if the * table has indexes. */ - if (vacrel->nindexes == 0 && prunestate.has_lpdead_items && + if (vacrel->nindexes == 0 && has_lpdead_items && blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) { FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, @@ -1383,6 +1364,14 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, * right after this operation completes instead of in the middle of it. Note that * any tuple that becomes dead after the call to heap_page_prune() can't need to * be frozen, because it was visible to another session when vacuum started. + * + * vmbuffer is the buffer containing the VM block with visibility information + * for the heap block, blkno. all_visible_according_to_vm is the saved + * visibility status of the heap block looked up earlier by the caller. We + * won't rely entirely on this status, as it may be out of date. + * + * *has_lpdead_items is set to true or false depending on whether, upon return + * from this function, any LP_DEAD items are still present on the page. */ static void lazy_scan_prune(LVRelState *vacrel, @@ -1391,7 +1380,7 @@ lazy_scan_prune(LVRelState *vacrel, Page page, Buffer vmbuffer, bool all_visible_according_to_vm, - LVPagePruneState *prunestate) + bool *has_lpdead_items) { Relation rel = vacrel->rel; OffsetNumber offnum, @@ -1404,6 +1393,9 @@ lazy_scan_prune(LVRelState *vacrel, recently_dead_tuples; HeapPageFreeze pagefrz; bool hastup = false; + bool all_visible, + all_frozen; + TransactionId visibility_cutoff_xid; int64 fpi_before = pgWalUsage.wal_fpi; OffsetNumber deadoffsets[MaxHeapTuplesPerPage]; HeapTupleFreeze frozen[MaxHeapTuplesPerPage]; @@ -1444,14 +1436,22 @@ lazy_scan_prune(LVRelState *vacrel, &presult, &vacrel->offnum); /* - * Now scan the page to collect LP_DEAD items and check for tuples - * requiring freezing among remaining tuples with storage + * We will update the VM after collecting LP_DEAD items and freezing + * tuples. Keep track of whether or not the page is all_visible and + * all_frozen and use this information to update the VM. all_visible + * implies 0 lpdead_items, but don't trust all_frozen result unless + * all_visible is also set to true. + * + * Also keep track of the visibility cutoff xid for recovery conflicts. */ - prunestate->has_lpdead_items = false; - prunestate->all_visible = true; - prunestate->all_frozen = true; - prunestate->visibility_cutoff_xid = InvalidTransactionId; + all_visible = true; + all_frozen = true; + visibility_cutoff_xid = InvalidTransactionId; + /* + * Now scan the page to collect LP_DEAD items and update the variables set + * just above. + */ for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) @@ -1538,13 +1538,13 @@ lazy_scan_prune(LVRelState *vacrel, * asynchronously. See SetHintBits for more info. Check that * the tuple is hinted xmin-committed because of that. */ - if (prunestate->all_visible) + if (all_visible) { TransactionId xmin; if (!HeapTupleHeaderXminCommitted(htup)) { - prunestate->all_visible = false; + all_visible = false; break; } @@ -1556,14 +1556,14 @@ lazy_scan_prune(LVRelState *vacrel, if (!TransactionIdPrecedes(xmin, vacrel->cutoffs.OldestXmin)) { - prunestate->all_visible = false; + all_visible = false; break; } /* Track newest xmin on page. */ - if (TransactionIdFollows(xmin, prunestate->visibility_cutoff_xid) && + if (TransactionIdFollows(xmin, visibility_cutoff_xid) && TransactionIdIsNormal(xmin)) - prunestate->visibility_cutoff_xid = xmin; + visibility_cutoff_xid = xmin; } break; case HEAPTUPLE_RECENTLY_DEAD: @@ -1574,7 +1574,7 @@ lazy_scan_prune(LVRelState *vacrel, * pruning.) */ recently_dead_tuples++; - prunestate->all_visible = false; + all_visible = false; break; case HEAPTUPLE_INSERT_IN_PROGRESS: @@ -1585,11 +1585,11 @@ lazy_scan_prune(LVRelState *vacrel, * results. This assumption is a bit shaky, but it is what * acquire_sample_rows() does, so be consistent. */ - prunestate->all_visible = false; + all_visible = false; break; case HEAPTUPLE_DELETE_IN_PROGRESS: /* This is an expected case during concurrent vacuum */ - prunestate->all_visible = false; + all_visible = false; /* * Count such rows as live. As above, we assume the deleting @@ -1619,7 +1619,7 @@ lazy_scan_prune(LVRelState *vacrel, * definitely cannot be set all-frozen in the visibility map later on */ if (!totally_frozen) - prunestate->all_frozen = false; + all_frozen = false; } /* @@ -1637,7 +1637,7 @@ lazy_scan_prune(LVRelState *vacrel, * page all-frozen afterwards (might not happen until final heap pass). */ if (pagefrz.freeze_required || tuples_frozen == 0 || - (prunestate->all_visible && prunestate->all_frozen && + (all_visible && all_frozen && fpi_before != pgWalUsage.wal_fpi)) { /* @@ -1675,11 +1675,11 @@ lazy_scan_prune(LVRelState *vacrel, * once we're done with it. Otherwise we generate a conservative * cutoff by stepping back from OldestXmin. */ - if (prunestate->all_visible && prunestate->all_frozen) + if (all_visible && all_frozen) { /* Using same cutoff when setting VM is now unnecessary */ - snapshotConflictHorizon = prunestate->visibility_cutoff_xid; - prunestate->visibility_cutoff_xid = InvalidTransactionId; + snapshotConflictHorizon = visibility_cutoff_xid; + visibility_cutoff_xid = InvalidTransactionId; } else { @@ -1702,7 +1702,7 @@ lazy_scan_prune(LVRelState *vacrel, */ vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid; vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid; - prunestate->all_frozen = false; + all_frozen = false; tuples_frozen = 0; /* avoid miscounts in instrumentation */ } @@ -1715,16 +1715,17 @@ lazy_scan_prune(LVRelState *vacrel, */ #ifdef USE_ASSERT_CHECKING /* Note that all_frozen value does not matter when !all_visible */ - if (prunestate->all_visible && lpdead_items == 0) + if (all_visible && lpdead_items == 0) { - TransactionId cutoff; - bool all_frozen; + TransactionId debug_cutoff; + bool debug_all_frozen; - if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen)) + if (!heap_page_is_all_visible(vacrel, buf, + &debug_cutoff, &debug_all_frozen)) Assert(false); - Assert(!TransactionIdIsValid(cutoff) || - cutoff == prunestate->visibility_cutoff_xid); + Assert(!TransactionIdIsValid(debug_cutoff) || + debug_cutoff == visibility_cutoff_xid); } #endif @@ -1737,7 +1738,6 @@ lazy_scan_prune(LVRelState *vacrel, ItemPointerData tmp; vacrel->lpdead_item_pages++; - prunestate->has_lpdead_items = true; ItemPointerSetBlockNumber(&tmp, blkno); @@ -1762,7 +1762,7 @@ lazy_scan_prune(LVRelState *vacrel, * Now that freezing has been finalized, unset all_visible. It needs * to reflect the present state of things, as expected by our caller. */ - prunestate->all_visible = false; + all_visible = false; } /* Finally, add page-local counts to whole-VACUUM counts */ @@ -1776,19 +1776,23 @@ lazy_scan_prune(LVRelState *vacrel, if (hastup) vacrel->nonempty_pages = blkno + 1; - Assert(!prunestate->all_visible || !prunestate->has_lpdead_items); + /* Did we find LP_DEAD items? */ + *has_lpdead_items = (lpdead_items > 0); + + Assert(!all_visible || !(*has_lpdead_items)); /* * Handle setting visibility map bit based on information from the VM (as - * of last lazy_scan_skip() call), and from prunestate + * of last lazy_scan_skip() call), and from all_visible and all_frozen + * variables */ - if (!all_visible_according_to_vm && prunestate->all_visible) + if (!all_visible_according_to_vm && all_visible) { uint8 flags = VISIBILITYMAP_ALL_VISIBLE; - if (prunestate->all_frozen) + if (all_frozen) { - Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid)); + Assert(!TransactionIdIsValid(visibility_cutoff_xid)); flags |= VISIBILITYMAP_ALL_FROZEN; } @@ -1808,7 +1812,7 @@ lazy_scan_prune(LVRelState *vacrel, PageSetAllVisible(page); MarkBufferDirty(buf); visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, - vmbuffer, prunestate->visibility_cutoff_xid, + vmbuffer, visibility_cutoff_xid, flags); } @@ -1841,7 +1845,7 @@ lazy_scan_prune(LVRelState *vacrel, * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set, * however. */ - else if (prunestate->has_lpdead_items && PageIsAllVisible(page)) + else if (lpdead_items > 0 && PageIsAllVisible(page)) { elog(WARNING, "page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u", vacrel->relname, blkno); @@ -1854,16 +1858,15 @@ lazy_scan_prune(LVRelState *vacrel, /* * If the all-visible page is all-frozen but not marked as such yet, mark * it as all-frozen. Note that all_frozen is only valid if all_visible is - * true, so we must check both prunestate fields. + * true, so we must check both all_visible and all_frozen. */ - else if (all_visible_according_to_vm && prunestate->all_visible && - prunestate->all_frozen && - !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer)) + else if (all_visible_according_to_vm && all_visible && + all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer)) { /* * Avoid relying on all_visible_according_to_vm as a proxy for the * page-level PD_ALL_VISIBLE bit being set, since it might have become - * stale -- even when all_visible is set in prunestate + * stale -- even when all_visible is set */ if (!PageIsAllVisible(page)) { @@ -1878,7 +1881,7 @@ lazy_scan_prune(LVRelState *vacrel, * since a snapshotConflictHorizon sufficient to make everything safe * for REDO was logged when the page's tuples were frozen. */ - Assert(!TransactionIdIsValid(prunestate->visibility_cutoff_xid)); + Assert(!TransactionIdIsValid(visibility_cutoff_xid)); visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, vmbuffer, InvalidTransactionId, VISIBILITYMAP_ALL_VISIBLE | diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 29fd1cae64..17c0104376 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1405,7 +1405,6 @@ LPVOID LPWSTR LSEG LUID -LVPagePruneState LVRelState LVSavedErrInfo LWLock