Refactor per-page logic common to all redo routines to a new function.

Every redo routine uses the same idiom to determine what to do to a page:
check if there's a backup block for it, and if not read, the buffer if the
block exists, and check its LSN. Refactor that into a common function,
XLogReadBufferForRedo, making all the redo routines shorter and more
readable.

This has no user-visible effect, and makes no changes to the WAL format.

Reviewed by Andres Freund, Alvaro Herrera, Michael Paquier.
This commit is contained in:
Heikki Linnakangas 2014-08-13 15:39:08 +03:00
parent 26f8b99b24
commit f8f4227976
8 changed files with 1430 additions and 1739 deletions

View File

@ -20,24 +20,24 @@
static MemoryContext opCtx; /* working memory for operations */
static void
ginRedoClearIncompleteSplit(XLogRecPtr lsn, RelFileNode node, BlockNumber blkno)
ginRedoClearIncompleteSplit(XLogRecPtr lsn, XLogRecord *record,
int block_index,
RelFileNode node, BlockNumber blkno)
{
Buffer buffer;
Page page;
buffer = XLogReadBuffer(node, blkno, false);
if (!BufferIsValid(buffer))
return; /* page was deleted, nothing to do */
if (XLogReadBufferForRedo(lsn, record, block_index, node, blkno, &buffer)
== BLK_NEEDS_REDO)
{
page = (Page) BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
GinPageGetOpaque(page)->flags &= ~GIN_INCOMPLETE_SPLIT;
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -332,7 +332,6 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
{
ginxlogInsert *data = (ginxlogInsert *) XLogRecGetData(record);
Buffer buffer;
Page page;
char *payload;
BlockNumber leftChildBlkno = InvalidBlockNumber;
BlockNumber rightChildBlkno = InvalidBlockNumber;
@ -351,26 +350,14 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
rightChildBlkno = BlockIdGetBlockNumber((BlockId) payload);
payload += sizeof(BlockIdData);
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
ginRedoClearIncompleteSplit(lsn, data->node, leftChildBlkno);
ginRedoClearIncompleteSplit(lsn, record, 0, data->node, leftChildBlkno);
}
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(isLeaf ? 0 : 1))
if (XLogReadBufferForRedo(lsn, record, isLeaf ? 0 : 1, data->node,
data->blkno, &buffer) == BLK_NEEDS_REDO)
{
(void) RestoreBackupBlock(lsn, record, isLeaf ? 0 : 1, false, false);
return;
}
Page page = BufferGetPage(buffer);
buffer = XLogReadBuffer(data->node, data->blkno, false);
if (!BufferIsValid(buffer))
return; /* page was deleted, nothing to do */
page = (Page) BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
/* How to insert the payload is tree-type specific */
if (data->flags & GIN_INSERT_ISDATA)
{
@ -386,7 +373,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -476,12 +463,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record)
* split
*/
if (!isLeaf)
{
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
ginRedoClearIncompleteSplit(lsn, data->node, data->leftChildBlkno);
}
ginRedoClearIncompleteSplit(lsn, record, 0, data->node, data->leftChildBlkno);
flags = 0;
if (isLeaf)
@ -605,30 +587,20 @@ ginRedoVacuumDataLeafPage(XLogRecPtr lsn, XLogRecord *record)
{
ginxlogVacuumDataLeafPage *xlrec = (ginxlogVacuumDataLeafPage *) XLogRecGetData(record);
Buffer buffer;
Page page;
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->node, xlrec->blkno,
&buffer) == BLK_NEEDS_REDO)
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
buffer = XLogReadBuffer(xlrec->node, xlrec->blkno, false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
Page page = BufferGetPage(buffer);
Assert(GinPageIsLeaf(page));
Assert(GinPageIsData(page));
if (lsn > PageGetLSN(page))
{
ginRedoRecompress(page, &xlrec->data);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -641,62 +613,42 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record)
Buffer lbuffer;
Page page;
if (record->xl_info & XLR_BKP_BLOCK(0))
dbuffer = RestoreBackupBlock(lsn, record, 0, false, true);
else
{
dbuffer = XLogReadBuffer(data->node, data->blkno, false);
if (BufferIsValid(dbuffer))
if (XLogReadBufferForRedo(lsn, record, 0, data->node, data->blkno, &dbuffer)
== BLK_NEEDS_REDO)
{
page = BufferGetPage(dbuffer);
if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->flags = GIN_DELETED;
PageSetLSN(page, lsn);
MarkBufferDirty(dbuffer);
}
}
}
if (record->xl_info & XLR_BKP_BLOCK(1))
pbuffer = RestoreBackupBlock(lsn, record, 1, false, true);
else
{
pbuffer = XLogReadBuffer(data->node, data->parentBlkno, false);
if (BufferIsValid(pbuffer))
if (XLogReadBufferForRedo(lsn, record, 1, data->node, data->parentBlkno,
&pbuffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(pbuffer);
if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
Assert(!GinPageIsLeaf(page));
GinPageDeletePostingItem(page, data->parentOffset);
PageSetLSN(page, lsn);
MarkBufferDirty(pbuffer);
}
}
}
if (record->xl_info & XLR_BKP_BLOCK(2))
(void) RestoreBackupBlock(lsn, record, 2, false, false);
else if (data->leftBlkno != InvalidBlockNumber)
{
lbuffer = XLogReadBuffer(data->node, data->leftBlkno, false);
if (BufferIsValid(lbuffer))
if (XLogReadBufferForRedo(lsn, record, 2, data->node, data->leftBlkno,
&lbuffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(lbuffer);
if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->rightlink = data->rightLink;
PageSetLSN(page, lsn);
MarkBufferDirty(lbuffer);
}
UnlockReleaseBuffer(lbuffer);
}
}
if (BufferIsValid(lbuffer))
UnlockReleaseBuffer(lbuffer);
if (BufferIsValid(pbuffer))
UnlockReleaseBuffer(pbuffer);
if (BufferIsValid(dbuffer))
@ -730,31 +682,29 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
/*
* insert into tail page
*/
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(data->node, data->metadata.tail, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, data->node,
data->metadata.tail, &buffer)
== BLK_NEEDS_REDO)
{
Page page = BufferGetPage(buffer);
OffsetNumber off;
int i;
Size tupsize;
IndexTuple tuples;
if (lsn > PageGetLSN(page))
{
OffsetNumber l,
off = (PageIsEmpty(page)) ? FirstOffsetNumber :
OffsetNumberNext(PageGetMaxOffsetNumber(page));
int i,
tupsize;
IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
if (PageIsEmpty(page))
off = FirstOffsetNumber;
else
off = OffsetNumberNext(PageGetMaxOffsetNumber(page));
for (i = 0; i < data->ntuples; i++)
{
tupsize = IndexTupleSize(tuples);
l = PageAddItem(page, (Item) tuples, tupsize, off, false, false);
if (l == InvalidOffsetNumber)
if (PageAddItem(page, (Item) tuples, tupsize, off,
false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page");
tuples = (IndexTuple) (((char *) tuples) + tupsize);
@ -770,35 +720,27 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
else if (data->prevTail != InvalidBlockNumber)
{
/*
* New tail
*/
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(data->node, data->prevTail, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, data->node, data->prevTail,
&buffer) == BLK_NEEDS_REDO)
{
Page page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
GinPageGetOpaque(page)->rightlink = data->newRightlink;
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
UnlockReleaseBuffer(metabuffer);
}

View File

@ -48,30 +48,25 @@ gistRedoClearFollowRight(XLogRecPtr lsn, XLogRecord *record, int block_index,
{
Buffer buffer;
Page page;
if (record->xl_info & XLR_BKP_BLOCK(block_index))
buffer = RestoreBackupBlock(lsn, record, block_index, false, true);
else
{
buffer = XLogReadBuffer(node, childblkno, false);
if (!BufferIsValid(buffer))
return; /* page was deleted, nothing to do */
}
page = (Page) BufferGetPage(buffer);
XLogRedoAction action;
/*
* Note that we still update the page even if page LSN is equal to the LSN
* of this record, because the updated NSN is not included in the full
* page image.
* Note that we still update the page even if it was restored from a full
* page image, because the updated NSN is not included in the image.
*/
if (lsn >= PageGetLSN(page))
action = XLogReadBufferForRedo(lsn, record, block_index, node, childblkno,
&buffer);
if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
{
page = BufferGetPage(buffer);
GistPageSetNSN(page, lsn);
GistClearFollowRight(page);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -87,43 +82,11 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
Page page;
char *data;
/*
* We need to acquire and hold lock on target page while updating the left
* child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
*/
if (record->xl_info & XLR_BKP_BLOCK(0))
buffer = RestoreBackupBlock(lsn, record, 0, false, true);
else
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
/* Fix follow-right data on left child page */
if (BlockNumberIsValid(xldata->leftchild))
gistRedoClearFollowRight(lsn, record, 1,
xldata->node, xldata->leftchild);
/* Done if target page no longer exists */
if (!BufferIsValid(buffer))
return;
/* nothing more to do if page was backed up (and no info to do it with) */
if (record->xl_info & XLR_BKP_BLOCK(0))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blkno,
&buffer) == BLK_NEEDS_REDO)
{
UnlockReleaseBuffer(buffer);
return;
}
page = (Page) BufferGetPage(buffer);
/* nothing more to do if change already applied */
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
}
data = begin + sizeof(gistxlogPageUpdate);
/* Delete old tuples */
@ -164,8 +127,8 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
else
{
/*
* special case: leafpage, nothing to insert, nothing to delete, then
* vacuum marks page
* special case: leafpage, nothing to insert, nothing to delete,
* then vacuum marks page
*/
if (GistPageIsLeaf(page) && xldata->ntodelete == 0)
GistClearTuplesDeleted(page);
@ -176,14 +139,28 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
xldata->blkno == GIST_ROOT_BLKNO)
{
/*
* all links on non-leaf root page was deleted by vacuum full, so root
* page becomes a leaf
* all links on non-leaf root page was deleted by vacuum full, so
* root page becomes a leaf
*/
GistPageSetLeaf(page);
}
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
/*
* Fix follow-right data on left child page
*
* This must be done while still holding the lock on the target page. Note
* that even if the target page no longer exists, we still attempt to
* replay the change on the child page.
*/
if (BlockNumberIsValid(xldata->leftchild))
gistRedoClearFollowRight(lsn, record, 1,
xldata->node, xldata->leftchild);
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}

View File

@ -7134,15 +7134,13 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
Buffer buffer;
Page page;
OffsetNumber *end;
OffsetNumber *redirected;
OffsetNumber *nowdead;
OffsetNumber *nowunused;
int nredirected;
int ndead;
int nunused;
Size freespace;
Size freespace = 0;
RelFileNode rnode;
BlockNumber blkno;
XLogRedoAction action;
rnode = xlrec->node;
blkno = xlrec->block;
/*
* We're about to remove tuples. In Hot Standby mode, ensure that there's
@ -7153,30 +7151,25 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
* latestRemovedXid is invalid, skip conflict processing.
*/
if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid,
xlrec->node);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
/*
* If we have a full-page image, restore it (using a cleanup lock) and
* we're done.
*/
if (record->xl_info & XLR_BKP_BLOCK(0))
action = XLogReadBufferForRedoExtended(lsn, record, 0,
rnode, MAIN_FORKNUM, blkno,
RBM_NORMAL, true, &buffer);
if (action == BLK_NEEDS_REDO)
{
(void) RestoreBackupBlock(lsn, record, 0, true, false);
return;
}
buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block, RBM_NORMAL);
if (!BufferIsValid(buffer))
return;
LockBufferForCleanup(buffer);
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
}
Page page = (Page) BufferGetPage(buffer);
OffsetNumber *end;
OffsetNumber *redirected;
OffsetNumber *nowdead;
OffsetNumber *nowunused;
int nredirected;
int ndead;
int nunused;
nredirected = xlrec->nredirected;
ndead = xlrec->ndead;
@ -7196,21 +7189,24 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
/*
* Note: we don't worry about updating the page's prunability hints. At
* worst this will cause an extra prune cycle to occur soon.
* Note: we don't worry about updating the page's prunability hints.
* At worst this will cause an extra prune cycle to occur soon.
*/
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
/*
* Update the FSM as well.
*
* XXX: We don't get here if the page was restored from full page image.
* We don't bother to update the FSM in that case, it doesn't need to be
* XXX: Don't do this if the page was restored from full page image. We
* don't bother to update the FSM in that case, it doesn't need to be
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO)
XLogRecordPageWithFreeSpace(xlrec->node, xlrec->block, freespace);
}
@ -7226,6 +7222,14 @@ static void
heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
Buffer buffer;
Page page;
RelFileNode rnode;
BlockNumber blkno;
XLogRedoAction action;
rnode = xlrec->node;
blkno = xlrec->block;
/*
* If there are any Hot Standby transactions running that have an xmin
@ -7237,60 +7241,43 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* rather than killing the transaction outright.
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, xlrec->node);
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
/*
* Read the heap page, if it still exists. If the heap file has dropped or
* truncated later in recovery, we don't need to update the page, but we'd
* better still update the visibility map.
*/
action = XLogReadBufferForRedo(lsn, record, 1, rnode, blkno, &buffer);
if (action == BLK_NEEDS_REDO)
{
/*
* We don't bump the LSN of the heap page when setting the visibility
* map bit (unless checksums are enabled, in which case we must),
* because that would generate an unworkable volume of full-page
* writes. This exposes us to torn page hazards, but since we're not
* inspecting the existing page contents in any way, we don't care.
*
* However, all operations that clear the visibility map bit *do* bump
* the LSN, and those operations will only be replayed if the XLOG LSN
* follows the page LSN. Thus, if the page LSN has advanced past our
* XLOG record's LSN, we mustn't mark the page all-visible, because
* the subsequent update won't be replayed to clear the flag.
*/
page = BufferGetPage(buffer);
PageSetAllVisible(page);
MarkBufferDirty(buffer);
}
else if (action == BLK_RESTORED)
{
/*
* If heap block was backed up, restore it. This can only happen with
* checksums enabled.
*/
if (record->xl_info & XLR_BKP_BLOCK(1))
{
Assert(DataChecksumsEnabled());
(void) RestoreBackupBlock(lsn, record, 1, false, false);
}
else
{
Buffer buffer;
Page page;
/*
* Read the heap page, if it still exists. If the heap file has been
* dropped or truncated later in recovery, we don't need to update the
* page, but we'd better still update the visibility map.
*/
buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM,
xlrec->block, RBM_NORMAL);
if (BufferIsValid(buffer))
{
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
/*
* We don't bump the LSN of the heap page when setting the
* visibility map bit (unless checksums are enabled, in which case
* we must), because that would generate an unworkable volume of
* full-page writes. This exposes us to torn page hazards, but
* since we're not inspecting the existing page contents in any
* way, we don't care.
*
* However, all operations that clear the visibility map bit *do*
* bump the LSN, and those operations will only be replayed if the
* XLOG LSN follows the page LSN. Thus, if the page LSN has
* advanced past our XLOG record's LSN, we mustn't mark the page
* all-visible, because the subsequent update won't be replayed to
* clear the flag.
*/
if (lsn > PageGetLSN(page))
{
PageSetAllVisible(page);
MarkBufferDirty(buffer);
}
/* Done with heap page. */
UnlockReleaseBuffer(buffer);
}
}
/*
* Even if we skipped the heap page update due to the LSN interlock, it's
@ -7305,8 +7292,8 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
Relation reln;
Buffer vmbuffer = InvalidBuffer;
reln = CreateFakeRelcacheEntry(xlrec->node);
visibilitymap_pin(reln, xlrec->block, &vmbuffer);
reln = CreateFakeRelcacheEntry(rnode);
visibilitymap_pin(reln, blkno, &vmbuffer);
/*
* Don't set the bit if replay has already passed this point.
@ -7320,7 +7307,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* real harm is done; and the next VACUUM will fix it.
*/
if (lsn > PageGetLSN(BufferGetPage(vmbuffer)))
visibilitymap_set(reln, xlrec->block, InvalidBuffer, lsn, vmbuffer,
visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
xlrec->cutoff_xid);
ReleaseBuffer(vmbuffer);
@ -7347,24 +7334,10 @@ heap_xlog_freeze_page(XLogRecPtr lsn, XLogRecord *record)
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(cutoff_xid, xlrec->node);
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->node, xlrec->block,
&buffer) == BLK_NEEDS_REDO)
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
}
page = BufferGetPage(buffer);
/* now execute freeze plan for each frozen tuple */
for (ntup = 0; ntup < xlrec->ntuples; ntup++)
@ -7382,6 +7355,8 @@ heap_xlog_freeze_page(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -7422,8 +7397,10 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
ItemId lp = NULL;
HeapTupleHeader htup;
BlockNumber blkno;
RelFileNode target_node;
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
target_node = xlrec->target.node;
/*
* The visibility map may need to be fixed even if the heap page is
@ -7431,7 +7408,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
*/
if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
Relation reln = CreateFakeRelcacheEntry(target_node);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@ -7440,24 +7417,11 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
FreeFakeRelcacheEntry(reln);
}
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
if (XLogReadBufferForRedo(lsn, record, 0, target_node, blkno, &buffer)
== BLK_NEEDS_REDO)
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
}
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) >= offnum)
lp = PageGetItemId(page, offnum);
@ -7485,6 +7449,8 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
htup->t_ctid = xlrec->target.tid;
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -7503,9 +7469,12 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
HeapTupleHeader htup;
xl_heap_header xlhdr;
uint32 newlen;
Size freespace;
Size freespace = 0;
RelFileNode target_node;
BlockNumber blkno;
XLogRedoAction action;
target_node = xlrec->target.node;
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
/*
@ -7514,7 +7483,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
*/
if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
Relation reln = CreateFakeRelcacheEntry(target_node);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@ -7523,34 +7492,26 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
FreeFakeRelcacheEntry(reln);
}
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
/*
* If we inserted the first and only tuple on the page, re-initialize
* the page from scratch.
*/
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
buffer = XLogReadBuffer(xlrec->target.node, blkno, true);
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
XLogReadBufferForRedoExtended(lsn, record, 0,
target_node, MAIN_FORKNUM, blkno,
RBM_ZERO, false, &buffer);
page = BufferGetPage(buffer);
PageInit(page, BufferGetPageSize(buffer), 0);
action = BLK_NEEDS_REDO;
}
else
{
buffer = XLogReadBuffer(xlrec->target.node, blkno, false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
action = XLogReadBufferForRedo(lsn, record, 0, target_node, blkno,
&buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
if (action == BLK_NEEDS_REDO)
{
UnlockReleaseBuffer(buffer);
return;
}
}
page = BufferGetPage(buffer);
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
@ -7587,6 +7548,8 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
PageClearAllVisible(page);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
/*
@ -7594,11 +7557,11 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
* Arbitrarily, our definition of "low" is less than 20%. We can't do much
* better than that without knowing the fill-factor for the table.
*
* XXX: We don't get here if the page was restored from full page image.
* We don't bother to update the FSM in that case, it doesn't need to be
* XXX: Don't do this if the page was restored from full page image. We
* don't bother to update the FSM in that case, it doesn't need to be
* totally accurate anyway.
*/
if (freespace < BLCKSZ / 5)
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(xlrec->target.node, blkno, freespace);
}
@ -7610,6 +7573,8 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
{
char *recdata = XLogRecGetData(record);
xl_heap_multi_insert *xlrec;
RelFileNode rnode;
BlockNumber blkno;
Buffer buffer;
Page page;
struct
@ -7619,10 +7584,10 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
} tbuf;
HeapTupleHeader htup;
uint32 newlen;
Size freespace;
BlockNumber blkno;
Size freespace = 0;
int i;
bool isinit = (record->xl_info & XLOG_HEAP_INIT_PAGE) != 0;
XLogRedoAction action;
/*
* Insertion doesn't overwrite MVCC data, so no conflict processing is
@ -7632,6 +7597,9 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
xlrec = (xl_heap_multi_insert *) recdata;
recdata += SizeOfHeapMultiInsert;
rnode = xlrec->node;
blkno = xlrec->blkno;
/*
* If we're reinitializing the page, the tuples are stored in order from
* FirstOffsetNumber. Otherwise there's an array of offsets in the WAL
@ -7640,15 +7608,13 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
if (!isinit)
recdata += sizeof(OffsetNumber) * xlrec->ntuples;
blkno = xlrec->blkno;
/*
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->node);
Relation reln = CreateFakeRelcacheEntry(rnode);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@ -7657,35 +7623,21 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
FreeFakeRelcacheEntry(reln);
}
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
if (isinit)
{
buffer = XLogReadBuffer(xlrec->node, blkno, true);
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
XLogReadBufferForRedoExtended(lsn, record, 0,
rnode, MAIN_FORKNUM, blkno,
RBM_ZERO, false, &buffer);
page = BufferGetPage(buffer);
PageInit(page, BufferGetPageSize(buffer), 0);
action = BLK_NEEDS_REDO;
}
else
{
buffer = XLogReadBuffer(xlrec->node, blkno, false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
action = XLogReadBufferForRedo(lsn, record, 0, rnode, blkno, &buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
if (action == BLK_NEEDS_REDO)
{
UnlockReleaseBuffer(buffer);
return;
}
}
page = BufferGetPage(buffer);
for (i = 0; i < xlrec->ntuples; i++)
{
OffsetNumber offnum;
@ -7733,6 +7685,8 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
PageClearAllVisible(page);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
/*
@ -7740,11 +7694,11 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
* Arbitrarily, our definition of "low" is less than 20%. We can't do much
* better than that without knowing the fill-factor for the table.
*
* XXX: We don't get here if the page was restored from full page image.
* We don't bother to update the FSM in that case, it doesn't need to be
* XXX: Don't do this if the page was restored from full page image. We
* don't bother to update the FSM in that case, it doesn't need to be
* totally accurate anyway.
*/
if (freespace < BLCKSZ / 5)
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(xlrec->node, blkno, freespace);
}
@ -7755,8 +7709,9 @@ static void
heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
{
xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
bool samepage = (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
RelFileNode rnode;
BlockNumber oldblk;
BlockNumber newblk;
Buffer obuffer,
nbuffer;
Page page;
@ -7775,24 +7730,29 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
} tbuf;
xl_heap_header_len xlhdr;
uint32 newlen;
Size freespace;
Size freespace = 0;
XLogRedoAction oldaction;
XLogRedoAction newaction;
/* initialize to keep the compiler quiet */
oldtup.t_data = NULL;
oldtup.t_len = 0;
rnode = xlrec->target.node;
newblk = ItemPointerGetBlockNumber(&xlrec->newtid);
oldblk = ItemPointerGetBlockNumber(&xlrec->target.tid);
/*
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
Relation reln = CreateFakeRelcacheEntry(rnode);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer);
visibilitymap_pin(reln, oldblk, &vmbuffer);
visibilitymap_clear(reln, oldblk, vmbuffer);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
@ -7807,37 +7767,12 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
* added the new tuple to the new page.
*/
if (record->xl_info & XLR_BKP_BLOCK(0))
{
obuffer = RestoreBackupBlock(lsn, record, 0, false, true);
if (samepage)
{
/* backup block covered both changes, so we're done */
UnlockReleaseBuffer(obuffer);
return;
}
goto newt;
}
/* Deal with old tuple version */
obuffer = XLogReadBuffer(xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (!BufferIsValid(obuffer))
goto newt;
oldaction = XLogReadBufferForRedo(lsn, record, 0, rnode, oldblk, &obuffer);
if (oldaction == BLK_NEEDS_REDO)
{
page = (Page) BufferGetPage(obuffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
if (samepage)
{
UnlockReleaseBuffer(obuffer);
return;
}
goto newt;
}
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) >= offnum)
lp = PageGetItemId(page, offnum);
@ -7869,22 +7804,30 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
/*
* this test is ugly, but necessary to avoid thinking that insert change
* is already applied
*/
if (samepage)
{
nbuffer = obuffer;
goto newsame;
}
PageSetLSN(page, lsn);
MarkBufferDirty(obuffer);
}
/* Deal with new tuple */
newt:;
/*
* Read the page the new tuple goes into, if different from old.
*/
if (oldblk == newblk)
{
nbuffer = obuffer;
newaction = oldaction;
}
else if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
XLogReadBufferForRedoExtended(lsn, record, 1,
rnode, MAIN_FORKNUM, newblk,
RBM_ZERO, false, &nbuffer);
page = (Page) BufferGetPage(nbuffer);
PageInit(page, BufferGetPageSize(nbuffer), 0);
newaction = BLK_NEEDS_REDO;
}
else
newaction = XLogReadBufferForRedo(lsn, record, 1, rnode, newblk,
&nbuffer);
/*
* The visibility map may need to be fixed even if the heap page is
@ -7893,57 +7836,19 @@ newt:;
if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer);
visibilitymap_pin(reln, newblk, &vmbuffer);
visibilitymap_clear(reln, newblk, vmbuffer);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
if (record->xl_info & XLR_BKP_BLOCK(1))
/* Deal with new tuple */
if (newaction == BLK_NEEDS_REDO)
{
(void) RestoreBackupBlock(lsn, record, 1, false, false);
if (BufferIsValid(obuffer))
UnlockReleaseBuffer(obuffer);
return;
}
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
nbuffer = XLogReadBuffer(xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->newtid)),
true);
Assert(BufferIsValid(nbuffer));
page = (Page) BufferGetPage(nbuffer);
PageInit(page, BufferGetPageSize(nbuffer), 0);
}
else
{
nbuffer = XLogReadBuffer(xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->newtid)),
false);
if (!BufferIsValid(nbuffer))
{
if (BufferIsValid(obuffer))
UnlockReleaseBuffer(obuffer);
return;
}
page = (Page) BufferGetPage(nbuffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(nbuffer);
if (BufferIsValid(obuffer))
UnlockReleaseBuffer(obuffer);
return;
}
}
newsame:;
offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
elog(PANIC, "heap_update_redo: invalid max offset number");
@ -7952,13 +7857,13 @@ newsame:;
if (xlrec->flags & XLOG_HEAP_PREFIX_FROM_OLD)
{
Assert(samepage);
Assert(newblk == oldblk);
memcpy(&prefixlen, recdata, sizeof(uint16));
recdata += sizeof(uint16);
}
if (xlrec->flags & XLOG_HEAP_SUFFIX_FROM_OLD)
{
Assert(samepage);
Assert(newblk == oldblk);
memcpy(&suffixlen, recdata, sizeof(uint16));
recdata += sizeof(uint16);
}
@ -7971,8 +7876,8 @@ newsame:;
MemSet((char *) htup, 0, sizeof(HeapTupleHeaderData));
/*
* Reconstruct the new tuple using the prefix and/or suffix from the old
* tuple, and the data stored in the WAL record.
* Reconstruct the new tuple using the prefix and/or suffix from the
* old tuple, and the data stored in the WAL record.
*/
newp = (char *) htup + offsetof(HeapTupleHeaderData, t_bits);
if (prefixlen > 0)
@ -7997,7 +7902,10 @@ newsame:;
}
else
{
/* copy bitmap [+ padding] [+ oid] + data from record, all in one go */
/*
* copy bitmap [+ padding] [+ oid] + data from record, all in one
* go
*/
memcpy(newp, recdata, xlhdr.t_len);
recdata += xlhdr.t_len;
newp += xlhdr.t_len;
@ -8028,9 +7936,10 @@ newsame:;
PageSetLSN(page, lsn);
MarkBufferDirty(nbuffer);
}
if (BufferIsValid(nbuffer) && nbuffer != obuffer)
UnlockReleaseBuffer(nbuffer);
if (BufferIsValid(obuffer) && obuffer != nbuffer)
if (BufferIsValid(obuffer))
UnlockReleaseBuffer(obuffer);
/*
@ -8044,11 +7953,11 @@ newsame:;
* as it did before the update, assuming the new tuple is about the same
* size as the old one.
*
* XXX: We don't get here if the page was restored from full page image.
* We don't bother to update the FSM in that case, it doesn't need to be
* XXX: Don't do this if the page was restored from full page image. We
* don't bother to update the FSM in that case, it doesn't need to be
* totally accurate anyway.
*/
if (!hot_update && freespace < BLCKSZ / 5)
if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->newtid)),
freespace);
@ -8064,26 +7973,12 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
ItemId lp = NULL;
HeapTupleHeader htup;
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->target.node,
ItemPointerGetBlockNumber(&xlrec->target.tid),
&buffer) == BLK_NEEDS_REDO)
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
buffer = XLogReadBuffer(xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
}
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) >= offnum)
lp = PageGetItemId(page, offnum);
@ -8110,6 +8005,8 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -8124,26 +8021,11 @@ heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
ItemId lp = NULL;
HeapTupleHeader htup;
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
buffer = XLogReadBuffer(xlrec->target.node,
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
&buffer) == BLK_NEEDS_REDO)
{
UnlockReleaseBuffer(buffer);
return;
}
page = BufferGetPage(buffer);
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) >= offnum)
lp = PageGetItemId(page, offnum);
@ -8159,6 +8041,8 @@ heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -8174,25 +8058,11 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
uint32 oldlen;
uint32 newlen;
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
buffer = XLogReadBuffer(xlrec->target.node,
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page)) /* changes are applied */
&buffer) == BLK_NEEDS_REDO)
{
UnlockReleaseBuffer(buffer);
return;
}
page = BufferGetPage(buffer);
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) >= offnum)
@ -8214,6 +8084,8 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}

View File

@ -116,17 +116,15 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
*/
static void
_bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record,
int block_index,
RelFileNode rnode, BlockNumber cblock)
{
Buffer buf;
buf = XLogReadBuffer(rnode, cblock, false);
if (BufferIsValid(buf))
if (XLogReadBufferForRedo(lsn, record, block_index, rnode, cblock, &buf)
== BLK_NEEDS_REDO)
{
Page page = (Page) BufferGetPage(buf);
if (lsn > PageGetLSN(page))
{
BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
Assert((pageop->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0);
@ -135,9 +133,9 @@ _bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record,
PageSetLSN(page, lsn);
MarkBufferDirty(buf);
}
if (BufferIsValid(buf))
UnlockReleaseBuffer(buf);
}
}
static void
btree_xlog_insert(bool isleaf, bool ismeta,
@ -184,28 +182,18 @@ btree_xlog_insert(bool isleaf, bool ismeta,
*/
if (!isleaf)
{
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
_bt_clear_incomplete_split(lsn, record, xlrec->target.node, cblkno);
_bt_clear_incomplete_split(lsn, record, 0, xlrec->target.node, cblkno);
main_blk_index = 1;
}
else
main_blk_index = 0;
if (record->xl_info & XLR_BKP_BLOCK(main_blk_index))
(void) RestoreBackupBlock(lsn, record, main_blk_index, false, false);
else
{
buffer = XLogReadBuffer(xlrec->target.node,
if (XLogReadBufferForRedo(lsn, record, main_blk_index, xlrec->target.node,
ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (BufferIsValid(buffer))
&buffer) == BLK_NEEDS_REDO)
{
page = (Page) BufferGetPage(buffer);
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
if (PageAddItem(page, (Item) datapos, datalen,
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false, false) == InvalidOffsetNumber)
@ -214,9 +202,8 @@ btree_xlog_insert(bool isleaf, bool ismeta,
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/*
* Note: in normal operation, we'd update the metapage while still holding
@ -299,12 +286,7 @@ btree_xlog_split(bool onleft, bool isroot,
* before locking the other pages)
*/
if (!isleaf)
{
if (record->xl_info & XLR_BKP_BLOCK(1))
(void) RestoreBackupBlock(lsn, record, 1, false, false);
else
_bt_clear_incomplete_split(lsn, record, xlrec->node, cblkno);
}
_bt_clear_incomplete_split(lsn, record, 1, xlrec->node, cblkno);
/* Reconstruct right (new) sibling page from scratch */
rbuf = XLogReadBuffer(xlrec->node, xlrec->rightsib, true);
@ -340,28 +322,20 @@ btree_xlog_split(bool onleft, bool isroot,
/* don't release the buffer yet; we touch right page's first item below */
/* Now reconstruct left (original) sibling page */
if (record->xl_info & XLR_BKP_BLOCK(0))
lbuf = RestoreBackupBlock(lsn, record, 0, false, true);
else
{
lbuf = XLogReadBuffer(xlrec->node, xlrec->leftsib, false);
if (BufferIsValid(lbuf))
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->node, xlrec->leftsib,
&lbuf) == BLK_NEEDS_REDO)
{
/*
* To retain the same physical order of the tuples that they had,
* we initialize a temporary empty page for the left page and add
* all the items to that in item number order. This mirrors how
* _bt_split() works. It's not strictly required to retain the
* same physical order, as long as the items are in the correct
* item number order, but it helps debugging. See also
* _bt_restore_page(), which does the same for the right page.
* To retain the same physical order of the tuples that they had, we
* initialize a temporary empty page for the left page and add all the
* items to that in item number order. This mirrors how _bt_split()
* works. It's not strictly required to retain the same physical
* order, as long as the items are in the correct item number order,
* but it helps debugging. See also _bt_restore_page(), which does
* the same for the right page.
*/
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
if (lsn > PageGetLSN(lpage))
{
OffsetNumber off;
Page newlpage;
OffsetNumber leftoff;
@ -420,8 +394,6 @@ btree_xlog_split(bool onleft, bool isroot,
PageSetLSN(lpage, lsn);
MarkBufferDirty(lbuf);
}
}
}
/* We no longer need the buffers */
if (BufferIsValid(lbuf))
@ -443,21 +415,12 @@ btree_xlog_split(bool onleft, bool isroot,
* whether this was a leaf or internal page.
*/
int rnext_index = isleaf ? 1 : 2;
if (record->xl_info & XLR_BKP_BLOCK(rnext_index))
(void) RestoreBackupBlock(lsn, record, rnext_index, false, false);
else
{
Buffer buffer;
buffer = XLogReadBuffer(xlrec->node, xlrec->rnext, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, rnext_index, xlrec->node,
xlrec->rnext, &buffer) == BLK_NEEDS_REDO)
{
Page page = (Page) BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
pageop->btpo_prev = xlrec->rightsib;
@ -465,11 +428,10 @@ btree_xlog_split(bool onleft, bool isroot,
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
}
static void
btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
@ -529,31 +491,16 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
}
}
/*
* If we have a full-page image, restore it (using a cleanup lock) and
* we're done.
*/
if (record->xl_info & XLR_BKP_BLOCK(0))
{
(void) RestoreBackupBlock(lsn, record, 0, true, false);
return;
}
/*
* Like in btvacuumpage(), we need to take a cleanup lock on every leaf
* page. See nbtree/README for details.
*/
buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block, RBM_NORMAL);
if (!BufferIsValid(buffer))
return;
LockBufferForCleanup(buffer);
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page))
if (XLogReadBufferForRedoExtended(lsn, record, 0,
xlrec->node, MAIN_FORKNUM, xlrec->block,
RBM_NORMAL, true, &buffer)
== BLK_NEEDS_REDO)
{
UnlockReleaseBuffer(buffer);
return;
}
page = (Page) BufferGetPage(buffer);
if (record->xl_len > SizeOfBtreeVacuum)
{
@ -568,14 +515,16 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
}
/*
* Mark the page as not containing any LP_DEAD items --- see comments in
* _bt_delitems_vacuum().
* Mark the page as not containing any LP_DEAD items --- see comments
* in _bt_delitems_vacuum().
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -752,27 +701,14 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, xlrec->node);
}
/* If we have a full-page image, restore it and we're done */
if (record->xl_info & XLR_BKP_BLOCK(0))
{
(void) RestoreBackupBlock(lsn, record, 0, false, false);
return;
}
/*
* We don't need to take a cleanup lock to apply these changes. See
* nbtree/README for details.
*/
buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page))
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->node, xlrec->block,
&buffer) == BLK_NEEDS_REDO)
{
UnlockReleaseBuffer(buffer);
return;
}
page = (Page) BufferGetPage(buffer);
if (record->xl_len > SizeOfBtreeDelete)
{
@ -784,14 +720,16 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
}
/*
* Mark the page as not containing any LP_DEAD items --- see comments in
* _bt_delitems_delete().
* Mark the page as not containing any LP_DEAD items --- see comments
* in _bt_delitems_delete().
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
@ -816,16 +754,8 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogRecPtr lsn, XLogRecord *record)
*/
/* parent page */
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xlrec->target.node, parent, false);
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
if (lsn > PageGetLSN(page))
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->target.node, parent,
&buffer) == BLK_NEEDS_REDO)
{
OffsetNumber poffset;
ItemId itemid;
@ -833,6 +763,9 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogRecPtr lsn, XLogRecord *record)
OffsetNumber nextoffset;
BlockNumber rightsib;
page = (Page) BufferGetPage(buffer);
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
poffset = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
nextoffset = OffsetNumberNext(poffset);
@ -849,9 +782,8 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/* Rewrite the leaf page as a halfdead page */
buffer = XLogReadBuffer(xlrec->target.node, xlrec->leafblk, true);
@ -911,57 +843,35 @@ btree_xlog_unlink_page(uint8 info, XLogRecPtr lsn, XLogRecord *record)
*/
/* Fix left-link of right sibling */
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xlrec->node, rightsib, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, xlrec->node, rightsib, &buffer)
== BLK_NEEDS_REDO)
{
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
pageop->btpo_prev = leftsib;
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
/* Fix right-link of left sibling, if any */
if (record->xl_info & XLR_BKP_BLOCK(1))
(void) RestoreBackupBlock(lsn, record, 1, false, false);
else
{
if (leftsib != P_NONE)
{
buffer = XLogReadBuffer(xlrec->node, leftsib, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 1, xlrec->node, leftsib, &buffer)
== BLK_NEEDS_REDO)
{
page = (Page) BufferGetPage(buffer);
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
pageop->btpo_next = rightsib;
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
}
/* Rewrite target page as empty deleted page */
buffer = XLogReadBuffer(xlrec->node, target, true);
@ -1071,10 +981,7 @@ btree_xlog_newroot(XLogRecPtr lsn, XLogRecord *record)
Assert(ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
/* Clear the incomplete-split flag in left child */
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
_bt_clear_incomplete_split(lsn, record, xlrec->node, cblkno);
_bt_clear_incomplete_split(lsn, record, 0, xlrec->node, cblkno);
}
PageSetLSN(page, lsn);

View File

@ -113,6 +113,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
SpGistLeafTupleData leafTupleHdr;
Buffer buffer;
Page page;
XLogRedoAction action;
ptr += sizeof(spgxlogAddLeaf);
leafTuple = ptr;
@ -124,22 +125,22 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
* simultaneously; but in WAL replay it should be safe to update the leaf
* page before updating the parent.
*/
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
if (xldata->newPage)
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoLeaf,
xldata->newPage);
if (BufferIsValid(buffer))
buffer = XLogReadBuffer(xldata->node, xldata->blknoLeaf, true);
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
action = BLK_NEEDS_REDO;
}
else
action = XLogReadBufferForRedo(lsn, record, 0,
xldata->node, xldata->blknoLeaf,
&buffer);
if (action == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
if (xldata->newPage)
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (lsn > PageGetLSN(page))
{
/* insert new tuple */
if (xldata->offnumLeaf != xldata->offnumHeadLeaf)
{
@ -162,8 +163,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
{
/* replacing a DEAD tuple */
PageIndexTupleDelete(page, xldata->offnumLeaf);
if (PageAddItem(page,
(Item) leafTuple, leafTupleHdr.size,
if (PageAddItem(page, (Item) leafTuple, leafTupleHdr.size,
xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
leafTupleHdr.size);
@ -172,23 +172,20 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/* update parent downlink if necessary */
if (record->xl_info & XLR_BKP_BLOCK(1))
(void) RestoreBackupBlock(lsn, record, 1, false, false);
else if (xldata->blknoParent != InvalidBlockNumber)
if (xldata->blknoParent != InvalidBlockNumber)
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
if (XLogReadBufferForRedo(lsn, record, 1,
xldata->node, xldata->blknoParent,
&buffer) == BLK_NEEDS_REDO)
{
SpGistInnerTuple tuple;
page = BufferGetPage(buffer);
tuple = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
@ -198,10 +195,10 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
static void
spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
@ -214,6 +211,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
int nInsert;
Buffer buffer;
Page page;
XLogRedoAction action;
fillFakeState(&state, xldata->stateSrc);
@ -234,36 +232,34 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
*/
/* Insert tuples on the dest page (do first, so redirect is valid) */
if (record->xl_info & XLR_BKP_BLOCK(1))
(void) RestoreBackupBlock(lsn, record, 1, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoDst,
xldata->newPage);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (xldata->newPage)
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoDst, true);
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (lsn > PageGetLSN(page))
action = BLK_NEEDS_REDO;
}
else
action = XLogReadBufferForRedo(lsn, record, 1,
xldata->node, xldata->blknoDst,
&buffer);
if (action == BLK_NEEDS_REDO)
{
int i;
page = BufferGetPage(buffer);
for (i = 0; i < nInsert; i++)
{
char *leafTuple;
SpGistLeafTupleData leafTupleHdr;
/*
* the tuples are not aligned, so must copy to access
* the size field.
* the tuples are not aligned, so must copy to access the size
* field.
*/
leafTuple = ptr;
memcpy(&leafTupleHdr, leafTuple,
sizeof(SpGistLeafTupleData));
memcpy(&leafTupleHdr, leafTuple, sizeof(SpGistLeafTupleData));
addOrReplaceTuple(page, (Item) leafTuple,
leafTupleHdr.size, toInsert[i]);
@ -273,21 +269,14 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/* Delete tuples from the source page, inserting a redirection pointer */
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoSrc, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blknoSrc,
&buffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
SPGIST_PLACEHOLDER,
@ -297,23 +286,17 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/* And update the parent downlink */
if (record->xl_info & XLR_BKP_BLOCK(2))
(void) RestoreBackupBlock(lsn, record, 2, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
if (XLogReadBufferForRedo(lsn, record, 2, xldata->node, xldata->blknoParent,
&buffer) == BLK_NEEDS_REDO)
{
SpGistInnerTuple tuple;
page = BufferGetPage(buffer);
tuple = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
@ -323,10 +306,9 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
static void
spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
@ -339,6 +321,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
Buffer buffer;
Page page;
int bbi;
XLogRedoAction action;
ptr += sizeof(spgxlogAddNode);
innerTuple = ptr;
@ -351,30 +334,22 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
{
/* update in place */
Assert(xldata->blknoParent == InvalidBlockNumber);
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blkno,
&buffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
PageIndexTupleDelete(page, xldata->offnum);
if (PageAddItem(page, (Item) innerTuple, innerTupleHdr.size,
xldata->offnum,
false, false) != xldata->offnum)
xldata->offnum, false, false) != xldata->offnum)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
innerTupleHdr.size);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
else
{
/*
@ -390,30 +365,28 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
Assert(xldata->blkno != xldata->blknoNew);
/* Install new tuple first so redirect is valid */
if (record->xl_info & XLR_BKP_BLOCK(1))
(void) RestoreBackupBlock(lsn, record, 1, false, false);
else
if (xldata->newPage)
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoNew,
xldata->newPage);
if (BufferIsValid(buffer))
buffer = XLogReadBuffer(xldata->node, xldata->blknoNew, true);
/* AddNode is not used for nulls pages */
SpGistInitBuffer(buffer, 0);
action = BLK_NEEDS_REDO;
}
else
action = XLogReadBufferForRedo(lsn, record, 1,
xldata->node, xldata->blknoNew,
&buffer);
if (action == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
/* AddNode is not used for nulls pages */
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) innerTuple,
innerTupleHdr.size, xldata->offnumNew);
/*
* If parent is in this same page, don't advance LSN;
* doing so would fool us into not applying the parent
* downlink update below. We'll update the LSN when we
* fix the parent downlink.
* If parent is in this same page, don't advance LSN; doing so
* would fool us into not applying the parent downlink update
* below. We'll update the LSN when we fix the parent downlink.
*/
if (xldata->blknoParent != xldata->blknoNew)
{
@ -421,23 +394,17 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/* Delete old tuple, replacing it with redirect or placeholder tuple */
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blkno,
&buffer) == BLK_NEEDS_REDO)
{
SpGistDeadTuple dt;
page = BufferGetPage(buffer);
if (state.isBuild)
dt = spgFormDeadTuple(&state, SPGIST_PLACEHOLDER,
InvalidBlockNumber,
@ -448,8 +415,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
xldata->offnumNew);
PageIndexTupleDelete(page, xldata->offnum);
if (PageAddItem(page, (Item) dt, dt->size,
xldata->offnum,
if (PageAddItem(page, (Item) dt, dt->size, xldata->offnum,
false, false) != xldata->offnum)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
dt->size);
@ -460,10 +426,9 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
SpGistPageGetOpaque(page)->nRedirection++;
/*
* If parent is in this same page, don't advance LSN;
* doing so would fool us into not applying the parent
* downlink update below. We'll update the LSN when we
* fix the parent downlink.
* If parent is in this same page, don't advance LSN; doing so
* would fool us into not applying the parent downlink update
* below. We'll update the LSN when we fix the parent downlink.
*/
if (xldata->blknoParent != xldata->blkno)
{
@ -471,9 +436,8 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/*
* Update parent downlink. Since parent could be in either of the
@ -491,17 +455,21 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
{
if (bbi == 2) /* else we already did it */
(void) RestoreBackupBlock(lsn, record, bbi, false, false);
action = BLK_RESTORED;
buffer = InvalidBuffer;
}
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
action = XLogReadBufferForRedo(lsn, record, bbi, xldata->node,
xldata->blknoParent, &buffer);
Assert(action != BLK_RESTORED);
}
if (action == BLK_NEEDS_REDO)
{
SpGistInnerTuple innerTuple;
page = BufferGetPage(buffer);
innerTuple = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
@ -511,11 +479,10 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
}
static void
spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
@ -545,43 +512,41 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
*/
/* insert postfix tuple first to avoid dangling link */
if (record->xl_info & XLR_BKP_BLOCK(1))
(void) RestoreBackupBlock(lsn, record, 1, false, false);
else if (xldata->blknoPostfix != xldata->blknoPrefix)
if (xldata->blknoPostfix != xldata->blknoPrefix)
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoPostfix,
xldata->newPage);
if (BufferIsValid(buffer))
XLogRedoAction action;
if (xldata->newPage)
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoPostfix, true);
/* SplitTuple is not used for nulls pages */
SpGistInitBuffer(buffer, 0);
action = BLK_NEEDS_REDO;
}
else
action = XLogReadBufferForRedo(lsn, record, 1,
xldata->node, xldata->blknoPostfix,
&buffer);
if (action == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
/* SplitTuple is not used for nulls pages */
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) postfixTuple,
postfixTupleHdr.size, xldata->offnumPostfix);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
/* now handle the original page */
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blknoPrefix, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blknoPrefix,
&buffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
PageIndexTupleDelete(page, xldata->offnumPrefix);
if (PageAddItem(page, (Item) prefixTuple, prefixTupleHdr.size,
xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
@ -589,17 +554,15 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
prefixTupleHdr.size);
if (xldata->blknoPostfix == xldata->blknoPrefix)
addOrReplaceTuple(page, (Item) postfixTuple,
postfixTupleHdr.size,
addOrReplaceTuple(page, (Item) postfixTuple, postfixTupleHdr.size,
xldata->offnumPostfix);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
static void
spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
@ -616,9 +579,11 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
Buffer destBuffer;
Page srcPage;
Page destPage;
Buffer innerBuffer;
Page page;
int bbi;
int i;
XLogRedoAction action;
fillFakeState(&state, xldata->stateSrc);
@ -668,23 +633,16 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
* inserting leaf tuples and the new inner tuple, else the added
* redirect tuple will be a dangling link.)
*/
if (record->xl_info & XLR_BKP_BLOCK(bbi))
{
srcBuffer = RestoreBackupBlock(lsn, record, bbi, false, true);
srcPage = NULL; /* don't need to do any page updates */
}
else
{
srcBuffer = XLogReadBuffer(xldata->node, xldata->blknoSrc, false);
if (BufferIsValid(srcBuffer))
if (XLogReadBufferForRedo(lsn, record, bbi,
xldata->node, xldata->blknoSrc,
&srcBuffer) == BLK_NEEDS_REDO)
{
srcPage = BufferGetPage(srcBuffer);
if (lsn > PageGetLSN(srcPage))
{
/*
* We have it a bit easier here than in doPickSplit(),
* because we know the inner tuple's location already, so
* we can inject the correct redirection tuple now.
* We have it a bit easier here than in doPickSplit(), because we
* know the inner tuple's location already, so we can inject the
* correct redirection tuple now.
*/
if (!state.isBuild)
spgPageIndexMultiDelete(&state, srcPage,
@ -704,11 +662,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
/* don't update LSN etc till we're done with it */
}
else
{
srcPage = NULL; /* don't do any page updates */
}
else
srcPage = NULL;
}
bbi++;
}
@ -735,22 +691,15 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
* We could probably release the page lock immediately in the
* full-page-image case, but for safety let's hold it till later.
*/
if (record->xl_info & XLR_BKP_BLOCK(bbi))
{
destBuffer = RestoreBackupBlock(lsn, record, bbi, false, true);
destPage = NULL; /* don't need to do any page updates */
}
else
{
destBuffer = XLogReadBuffer(xldata->node, xldata->blknoDest, false);
if (BufferIsValid(destBuffer))
if (XLogReadBufferForRedo(lsn, record, bbi,
xldata->node, xldata->blknoDest,
&destBuffer) == BLK_NEEDS_REDO)
{
destPage = (Page) BufferGetPage(destBuffer);
if (lsn <= PageGetLSN(destPage))
destPage = NULL; /* don't do any page updates */
}
else
destPage = NULL;
{
destPage = NULL; /* don't do any page updates */
}
bbi++;
}
@ -787,23 +736,21 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
}
/* restore new inner tuple */
if (record->xl_info & XLR_BKP_BLOCK(bbi))
(void) RestoreBackupBlock(lsn, record, bbi, false, false);
else
{
Buffer buffer = XLogReadBuffer(xldata->node, xldata->blknoInner,
xldata->initInner);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (xldata->initInner)
SpGistInitBuffer(buffer,
(xldata->storesNulls ? SPGIST_NULLS : 0));
if (lsn > PageGetLSN(page))
{
innerBuffer = XLogReadBuffer(xldata->node, xldata->blknoInner, true);
SpGistInitBuffer(innerBuffer,
(xldata->storesNulls ? SPGIST_NULLS : 0));
action = BLK_NEEDS_REDO;
}
else
action = XLogReadBufferForRedo(lsn, record, bbi, xldata->node,
xldata->blknoInner, &innerBuffer);
if (action == BLK_NEEDS_REDO)
{
page = BufferGetPage(innerBuffer);
addOrReplaceTuple(page, (Item) innerTuple, innerTupleHdr.size,
xldata->offnumInner);
@ -819,11 +766,10 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
}
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
UnlockReleaseBuffer(buffer);
}
MarkBufferDirty(innerBuffer);
}
if (BufferIsValid(innerBuffer))
UnlockReleaseBuffer(innerBuffer);
bbi++;
/*
@ -843,31 +789,26 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
}
else if (xldata->blknoInner != xldata->blknoParent)
{
if (record->xl_info & XLR_BKP_BLOCK(bbi))
(void) RestoreBackupBlock(lsn, record, bbi, false, false);
else
{
Buffer buffer = XLogReadBuffer(xldata->node, xldata->blknoParent, false);
Buffer parentBuffer;
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
if (XLogReadBufferForRedo(lsn, record, bbi,
xldata->node, xldata->blknoParent,
&parentBuffer) == BLK_NEEDS_REDO)
{
SpGistInnerTuple parent;
page = BufferGetPage(parentBuffer);
parent = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(parent, xldata->nodeI,
xldata->blknoInner, xldata->offnumInner);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
UnlockReleaseBuffer(buffer);
}
MarkBufferDirty(parentBuffer);
}
if (BufferIsValid(parentBuffer))
UnlockReleaseBuffer(parentBuffer);
}
}
@ -902,16 +843,11 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
ptr += sizeof(OffsetNumber) * xldata->nChain;
chainDest = (OffsetNumber *) ptr;
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blkno,
&buffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
spgPageIndexMultiDelete(&state, page,
toDead, xldata->nDead,
SPGIST_DEAD, SPGIST_DEAD,
@ -955,10 +891,9 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
static void
spgRedoVacuumRoot(XLogRecPtr lsn, XLogRecord *record)
@ -971,26 +906,20 @@ spgRedoVacuumRoot(XLogRecPtr lsn, XLogRecord *record)
toDelete = xldata->offsets;
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (BufferIsValid(buffer))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blkno,
&buffer) == BLK_NEEDS_REDO)
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
{
/* The tuple numbers are in order */
PageIndexMultiDelete(page, toDelete, xldata->nDelete);
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
static void
spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
@ -999,7 +928,6 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
spgxlogVacuumRedirect *xldata = (spgxlogVacuumRedirect *) ptr;
OffsetNumber *itemToPlaceholder;
Buffer buffer;
Page page;
itemToPlaceholder = xldata->offsets;
@ -1014,17 +942,10 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
xldata->node);
}
if (record->xl_info & XLR_BKP_BLOCK(0))
(void) RestoreBackupBlock(lsn, record, 0, false, false);
else
{
buffer = XLogReadBuffer(xldata->node, xldata->blkno, false);
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (lsn > PageGetLSN(page))
if (XLogReadBufferForRedo(lsn, record, 0, xldata->node, xldata->blkno,
&buffer) == BLK_NEEDS_REDO)
{
Page page = BufferGetPage(buffer);
SpGistPageOpaque opaque = SpGistPageGetOpaque(page);
int i;
@ -1068,11 +989,9 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
}
}
}
void
spg_redo(XLogRecPtr lsn, XLogRecord *record)

View File

@ -500,34 +500,29 @@ incrementally update the page, the rdata array *must* mention the buffer
ID at least once; otherwise there is no defense against torn-page problems.
The standard replay-routine pattern for this case is
if (record->xl_info & XLR_BKP_BLOCK(N))
if (XLogReadBufferForRedo(lsn, record, N, rnode, blkno, &buffer) == BLK_NEEDS_REDO)
{
/* apply the change from the full-page image */
(void) RestoreBackupBlock(lsn, record, N, false, false);
return;
}
buffer = XLogReadBuffer(rnode, blkno, false);
if (!BufferIsValid(buffer))
{
/* page has been deleted, so we need do nothing */
return;
}
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
{
/* changes are already applied */
UnlockReleaseBuffer(buffer);
return;
}
... apply the change ...
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
UnlockReleaseBuffer(buffer);
XLogReadBufferForRedo reads the page from disk, and checks what action needs to
be taken to the page. If the XLR_BKP_BLOCK(N) flag is set, it restores the
full page image and returns BLK_RESTORED. If there is no full page image, but
page cannot be found or if the change has already been replayed (i.e. the
page's LSN >= the record we're replaying), it returns BLK_NOTFOUND or BLK_DONE,
respectively. Usually, the redo routine only needs to pay attention to the
BLK_NEEDS_REDO return code, which means that the routine should apply the
incremental change. In any case, the caller is responsible for unlocking and
releasing the buffer. Note that XLogReadBufferForRedo returns the buffer
locked even if no redo is required, unless the page does not exist.
As noted above, for a multi-page update you need to be able to determine
which XLR_BKP_BLOCK(N) flag applies to each page. If a WAL record reflects
a combination of fully-rewritable and incremental updates, then the rewritable
@ -539,31 +534,8 @@ per the above discussion, fully-rewritable buffers shouldn't be mentioned in
When replaying a WAL record that describes changes on multiple pages, you
must be careful to lock the pages properly to prevent concurrent Hot Standby
queries from seeing an inconsistent state. If this requires that two
or more buffer locks be held concurrently, the coding pattern shown above
is too simplistic, since it assumes the routine can exit as soon as it's
known the current page requires no modification. Instead, you might have
something like
if (record->xl_info & XLR_BKP_BLOCK(0))
{
/* apply the change from the full-page image */
buffer0 = RestoreBackupBlock(lsn, record, 0, false, true);
}
else
{
buffer0 = XLogReadBuffer(rnode, blkno, false);
if (BufferIsValid(buffer0))
{
... apply the change if not already done ...
MarkBufferDirty(buffer0);
}
}
... similarly apply the changes for remaining pages ...
/* and now we can release the lock on the first page */
if (BufferIsValid(buffer0))
UnlockReleaseBuffer(buffer0);
or more buffer locks be held concurrently, you must lock the pages in
appropriate order, and not release the locks until all the changes are done.
Note that we must only use PageSetLSN/PageGetLSN() when we know the action
is serialised. Only Startup process may modify data blocks during recovery,

View File

@ -242,6 +242,87 @@ XLogCheckInvalidPages(void)
invalid_page_tab = NULL;
}
/*
* XLogReadBufferForRedo
* Read a page during XLOG replay
*
* Reads a block referenced by a WAL record into shared buffer cache, and
* determines what needs to be done to redo the changes to it. If the WAL
* record includes a full-page image of the page, it is restored.
*
* 'lsn' is the LSN of the record being replayed. It is compared with the
* page's LSN to determine if the record has already been replayed.
* 'rnode' and 'blkno' point to the block being replayed (main fork number
* is implied, use XLogReadBufferForRedoExtended for other forks).
* 'block_index' identifies the backup block in the record for the page.
*
* Returns one of the following:
*
* BLK_NEEDS_REDO - changes from the WAL record need to be applied
* BLK_DONE - block doesn't need replaying
* BLK_RESTORED - block was restored from a full-page image included in
* the record
* BLK_NOTFOUND - block was not found (because it was truncated away by
* an operation later in the WAL stream)
*
* On return, the buffer is locked in exclusive-mode, and returned in *buf.
* Note that the buffer is locked and returned even if it doesn't need
* replaying. (Getting the buffer lock is not really necessary during
* single-process crash recovery, but some subroutines such as MarkBufferDirty
* will complain if we don't have the lock. In hot standby mode it's
* definitely necessary.)
*/
XLogRedoAction
XLogReadBufferForRedo(XLogRecPtr lsn, XLogRecord *record, int block_index,
RelFileNode rnode, BlockNumber blkno,
Buffer *buf)
{
return XLogReadBufferForRedoExtended(lsn, record, block_index,
rnode, MAIN_FORKNUM, blkno,
RBM_NORMAL, false, buf);
}
/*
* XLogReadBufferForRedoExtended
* Like XLogReadBufferForRedo, but with extra options.
*
* If mode is RBM_ZERO or RBM_ZERO_ON_ERROR, if the page doesn't exist, the
* relation is extended with all-zeroes pages up to the referenced block
* number. In RBM_ZERO mode, the return value is always BLK_NEEDS_REDO.
*
* If 'get_cleanup_lock' is true, a "cleanup lock" is acquired on the buffer
* using LockBufferForCleanup(), instead of a regular exclusive lock.
*/
XLogRedoAction
XLogReadBufferForRedoExtended(XLogRecPtr lsn, XLogRecord *record,
int block_index, RelFileNode rnode,
ForkNumber forkno, BlockNumber blkno,
ReadBufferMode mode, bool get_cleanup_lock,
Buffer *buf)
{
if (record->xl_info & XLR_BKP_BLOCK(block_index))
{
*buf = RestoreBackupBlock(lsn, record, block_index,
get_cleanup_lock, true);
return BLK_RESTORED;
}
else
{
*buf = XLogReadBufferExtended(rnode, forkno, blkno, mode);
if (BufferIsValid(*buf))
{
LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
if (lsn <= PageGetLSN(BufferGetPage(*buf)))
return BLK_DONE;
else
return BLK_NEEDS_REDO;
}
else
return BLK_NOTFOUND;
}
}
/*
* XLogReadBuffer
* Read a page during XLOG replay.

View File

@ -1,7 +1,7 @@
/*
* xlogutils.h
*
* PostgreSQL transaction log manager utility routines
* Utilities for replaying WAL records.
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@ -11,6 +11,7 @@
#ifndef XLOG_UTILS_H
#define XLOG_UTILS_H
#include "access/xlog.h"
#include "storage/bufmgr.h"
@ -22,6 +23,26 @@ extern void XLogDropDatabase(Oid dbid);
extern void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
BlockNumber nblocks);
/* Result codes for XLogReadBufferForRedo[Extended] */
typedef enum
{
BLK_NEEDS_REDO, /* changes from WAL record need to be applied */
BLK_DONE, /* block is already up-to-date */
BLK_RESTORED, /* block was restored from a full-page image */
BLK_NOTFOUND /* block was not found (and hence does not need to be
* replayed) */
} XLogRedoAction;
extern XLogRedoAction XLogReadBufferForRedo(XLogRecPtr lsn, XLogRecord *record,
int block_index, RelFileNode rnode, BlockNumber blkno,
Buffer *buf);
extern XLogRedoAction XLogReadBufferForRedoExtended(XLogRecPtr lsn,
XLogRecord *record, int block_index,
RelFileNode rnode, ForkNumber forkno,
BlockNumber blkno,
ReadBufferMode mode, bool get_cleanup_lock,
Buffer *buf);
extern Buffer XLogReadBuffer(RelFileNode rnode, BlockNumber blkno, bool init);
extern Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
BlockNumber blkno, ReadBufferMode mode);