Ensure we MarkBufferDirty before visibilitymap_set()

logs the heap page and sets the LSN. Otherwise a
checkpoint could occur between those actions and
leave us in an inconsistent state.

Jeff Davis
This commit is contained in:
Simon Riggs 2013-04-30 08:15:49 +01:00
parent fdea2530bd
commit 730924397c
1 changed files with 26 additions and 21 deletions

View File

@ -894,26 +894,25 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
freespace = PageGetHeapFreeSpace(page); freespace = PageGetHeapFreeSpace(page);
/* mark page all-visible, if appropriate */ /* mark page all-visible, if appropriate */
if (all_visible) if (all_visible && !all_visible_according_to_vm)
{ {
if (!PageIsAllVisible(page)) /*
{ * It should never be the case that the visibility map page is set
PageSetAllVisible(page); * while the page-level bit is clear, but the reverse is allowed
MarkBufferDirty(buf); * (if checksums are not enabled). Regardless, set the both bits
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr, * so that we get back in sync.
vmbuffer, visibility_cutoff_xid); *
} * NB: If the heap page is all-visible but the VM bit is not set,
else if (!all_visible_according_to_vm) * we don't need to dirty the heap page. However, if checksums are
{ * enabled, we do need to make sure that the heap page is dirtied
/* * before passing it to visibilitymap_set(), because it may be
* It should never be the case that the visibility map page is * logged. Given that this situation should only happen in rare
* set while the page-level bit is clear, but the reverse is * cases after a crash, it is not worth optimizing.
* allowed. Set the visibility map bit as well so that we get */
* back in sync. PageSetAllVisible(page);
*/ MarkBufferDirty(buf);
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr, visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, visibility_cutoff_xid); vmbuffer, visibility_cutoff_xid);
}
} }
/* /*
@ -1138,6 +1137,14 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
PageRepairFragmentation(page); PageRepairFragmentation(page);
/*
* Mark buffer dirty before we write WAL.
*
* If checksums are enabled, visibilitymap_set() may log the heap page, so
* we must mark heap buffer dirty before calling visibilitymap_set().
*/
MarkBufferDirty(buffer);
/* /*
* Now that we have removed the dead tuples from the page, once again check * Now that we have removed the dead tuples from the page, once again check
* if the page has become all-visible. * if the page has become all-visible.
@ -1151,8 +1158,6 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
visibility_cutoff_xid); visibility_cutoff_xid);
} }
MarkBufferDirty(buffer);
/* XLOG stuff */ /* XLOG stuff */
if (RelationNeedsWAL(onerel)) if (RelationNeedsWAL(onerel))
{ {