Prevent index-only scans from returning wrong answers under Hot Standby.

The alternative of disallowing index-only scans in HS operation was
discussed, but the consensus was that it was better to treat marking
a page all-visible as a recovery conflict for snapshots that could still
fail to see XIDs on that page.  We may in the future try to soften this,
so that we simply force index scans to do heap fetches in cases where
this may be an issue, rather than throwing a hard conflict.
This commit is contained in:
Robert Haas 2012-04-26 20:00:21 -04:00
parent 92df220343
commit 3424bff90f
7 changed files with 36 additions and 11 deletions

View File

@ -4368,7 +4368,8 @@ log_heap_freeze(Relation reln, Buffer buffer,
* and dirtied.
*/
XLogRecPtr
log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer)
log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer,
TransactionId cutoff_xid)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
@ -4376,6 +4377,7 @@ log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer)
xlrec.node = rnode;
xlrec.block = block;
xlrec.cutoff_xid = cutoff_xid;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapVisible;
@ -4708,6 +4710,17 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
/*
* If there are any Hot Standby transactions running that have an xmin
* horizon old enough that this page isn't all-visible for them, they
* might incorrectly decide that an index-only scan can skip a heap fetch.
*
* NB: It might be better to throw some kind of "soft" conflict here that
* forces any index-only scan that is in flight to perform heap fetches,
* rather than killing the transaction outright.
*/
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, xlrec->node);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
@ -4760,7 +4773,8 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* harm is done; and the next VACUUM will fix it.
*/
if (!XLByteLE(lsn, PageGetLSN(BufferGetPage(vmbuffer))))
visibilitymap_set(reln, xlrec->block, lsn, vmbuffer);
visibilitymap_set(reln, xlrec->block, lsn, vmbuffer,
xlrec->cutoff_xid);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);

View File

@ -229,7 +229,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
* or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
* page LSN to that value.
* page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
* You must pass a buffer containing the correct map page to this function.
* Call visibilitymap_pin first to pin the right one. This function doesn't do
@ -237,7 +239,7 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
*/
void
visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
Buffer buf)
Buffer buf, TransactionId cutoff_xid)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
@ -269,7 +271,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
if (RelationNeedsWAL(rel))
{
if (XLogRecPtrIsInvalid(recptr))
recptr = log_heap_visible(rel->rd_node, heapBlk, buf);
recptr = log_heap_visible(rel->rd_node, heapBlk, buf,
cutoff_xid);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}

View File

@ -448,6 +448,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
bool all_visible_according_to_vm;
bool all_visible;
bool has_dead_tuples;
TransactionId visibility_cutoff_xid = InvalidTransactionId;
if (blkno == next_not_all_visible_block)
{
@ -627,7 +628,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
{
PageSetAllVisible(page);
MarkBufferDirty(buf);
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer);
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer,
InvalidTransactionId);
}
UnlockReleaseBuffer(buf);
@ -759,6 +761,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
all_visible = false;
break;
}
/* Track newest xmin on page. */
if (TransactionIdFollows(xmin, visibility_cutoff_xid))
visibility_cutoff_xid = xmin;
}
break;
case HEAPTUPLE_RECENTLY_DEAD:
@ -853,7 +859,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
PageSetAllVisible(page);
MarkBufferDirty(buf);
}
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer);
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer,
visibility_cutoff_xid);
}
/*

View File

@ -141,7 +141,7 @@ extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer,
TransactionId cutoff_xid,
OffsetNumber *offsets, int offcnt);
extern XLogRecPtr log_heap_visible(RelFileNode rnode, BlockNumber block,
Buffer vm_buffer);
Buffer vm_buffer, TransactionId cutoff_xid);
extern XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum,
BlockNumber blk, Page page);

View File

@ -788,9 +788,10 @@ typedef struct xl_heap_visible
{
RelFileNode node;
BlockNumber block;
TransactionId cutoff_xid;
} xl_heap_visible;
#define SizeOfHeapVisible (offsetof(xl_heap_visible, block) + sizeof(BlockNumber))
#define SizeOfHeapVisible (offsetof(xl_heap_visible, cutoff_xid) + sizeof(TransactionId))
extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
TransactionId *latestRemovedXid);

View File

@ -25,7 +25,7 @@ extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk,
Buffer *vmbuf);
extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf);
extern void visibilitymap_set(Relation rel, BlockNumber heapBlk,
XLogRecPtr recptr, Buffer vmbuf);
XLogRecPtr recptr, Buffer vmbuf, TransactionId cutoff_xid);
extern bool visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
extern BlockNumber visibilitymap_count(Relation rel);
extern void visibilitymap_truncate(Relation rel, BlockNumber nheapblocks);

View File

@ -71,7 +71,7 @@ typedef struct XLogContRecord
/*
* Each page of XLOG file has a header like this:
*/
#define XLOG_PAGE_MAGIC 0xD070 /* can be used as WAL version indicator */
#define XLOG_PAGE_MAGIC 0xD071 /* can be used as WAL version indicator */
typedef struct XLogPageHeaderData
{