Patch VACUUM problem with moving chain of update tuples when source

and destination of a tuple lie on the same page.
(Previously fixed in REL7_0 branch, now apply to current.)
This commit is contained in:
Tom Lane 2000-10-22 19:49:43 +00:00
parent cd57d64876
commit 5aeec4bbbc
1 changed files with 56 additions and 22 deletions

View File

@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.168 2000/10/16 17:08:05 momjian Exp $ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.169 2000/10/22 19:49:43 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
@ -669,6 +669,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
tuple.t_data->t_cmin)) tuple.t_data->t_cmin))
{ {
tuple.t_data->t_infomask |= HEAP_XMIN_INVALID; tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
pgchanged = true;
tupgone = true; tupgone = true;
} }
else else
@ -683,6 +684,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
tuple.t_data->t_cmin)) tuple.t_data->t_cmin))
{ {
tuple.t_data->t_infomask |= HEAP_XMIN_INVALID; tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
pgchanged = true;
tupgone = true; tupgone = true;
} }
else else
@ -730,8 +732,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
{ {
if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE) if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
{ {
pgchanged = true;
tuple.t_data->t_infomask |= HEAP_XMAX_INVALID; tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
tuple.t_data->t_infomask &=
~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
pgchanged = true;
} }
else else
tupgone = true; tupgone = true;
@ -746,6 +750,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE) if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
{ {
tuple.t_data->t_infomask |= HEAP_XMAX_INVALID; tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
tuple.t_data->t_infomask &=
~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
pgchanged = true; pgchanged = true;
} }
else else
@ -759,6 +765,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* from crashed process. - vadim 06/02/97 * from crashed process. - vadim 06/02/97
*/ */
tuple.t_data->t_infomask |= HEAP_XMAX_INVALID; tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
tuple.t_data->t_infomask &=
~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
pgchanged = true; pgchanged = true;
} }
else else
@ -818,6 +826,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
{ {
ItemId lpp; ItemId lpp;
/*
* Here we are building a temporary copy of the page with
* dead tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
* determine how much space will be available after
* removal of dead tuples. But note we are NOT changing
* the real page yet...
*/
if (tempPage == (Page) NULL) if (tempPage == (Page) NULL)
{ {
Size pageSize; Size pageSize;
@ -827,14 +843,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
memmove(tempPage, page, pageSize); memmove(tempPage, page, pageSize);
} }
/* mark it unused on the temp page */
lpp = &(((PageHeader) tempPage)->pd_linp[offnum - 1]); lpp = &(((PageHeader) tempPage)->pd_linp[offnum - 1]);
/* mark it unused */
lpp->lp_flags &= ~LP_USED; lpp->lp_flags &= ~LP_USED;
vacpage->offsets[vacpage->offsets_free++] = offnum; vacpage->offsets[vacpage->offsets_free++] = offnum;
tups_vacuumed++; tups_vacuumed++;
} }
else else
{ {
@ -1397,20 +1411,45 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
tuple.t_datamcxt = NULL; tuple.t_datamcxt = NULL;
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid); tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid); tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
/* Get page to move in */
/*
* make a copy of the source tuple, and then mark the
* source tuple MOVED_OFF.
*/
heap_copytuple_with_tuple(&tuple, &newtup);
RelationInvalidateHeapTuple(onerel, &tuple);
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
/* Get page to move to */
cur_buffer = ReadBuffer(onerel, destvacpage->blkno); cur_buffer = ReadBuffer(onerel, destvacpage->blkno);
/* /*
* We should LockBuffer(cur_buffer) but don't, at the * We should LockBuffer(cur_buffer) but don't, at the
* moment. If you'll do LockBuffer then UNLOCK it * moment. This should be safe enough, since we have
* before index_insert: unique btree-s call heap_fetch * exclusive lock on the whole relation.
* to get t_infomask of inserted heap tuple !!! * If you'll do LockBuffer then UNLOCK it before
* index_insert: unique btree-s call heap_fetch to get
* t_infomask of inserted heap tuple !!!
*/ */
ToPage = BufferGetPage(cur_buffer); ToPage = BufferGetPage(cur_buffer);
/* /*
* If this page was not used before - clean it. * If this page was not used before - clean it.
* *
* NOTE: a nasty bug used to lurk here. It is possible
* for the source and destination pages to be the same
* (since this tuple-chain member can be on a page lower
* than the one we're currently processing in the outer
* loop). If that's true, then after vacuum_page() the
* source tuple will have been moved, and tuple.t_data
* will be pointing at garbage. Therefore we must do
* everything that uses tuple.t_data BEFORE this step!!
*
* This path is different from the other callers of * This path is different from the other callers of
* vacuum_page, because we have already incremented the * vacuum_page, because we have already incremented the
* vacpage's offsets_used field to account for the * vacpage's offsets_used field to account for the
@ -1428,8 +1467,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vacuum_page(ToPage, destvacpage); vacuum_page(ToPage, destvacpage);
destvacpage->offsets_used = sv_offsets_used; destvacpage->offsets_used = sv_offsets_used;
} }
heap_copytuple_with_tuple(&tuple, &newtup);
RelationInvalidateHeapTuple(onerel, &tuple); /*
* Update the state of the copied tuple, and store it
* on the destination page.
*/
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin)); TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &= newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF); ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
@ -1450,8 +1492,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
last_move_dest_block = destvacpage->blkno; last_move_dest_block = destvacpage->blkno;
/* /*
* Set t_ctid pointing to itself for last tuple in * Set new tuple's t_ctid pointing to itself for last
* chain and to next tuple in chain otherwise. * tuple in chain, and to next tuple in chain otherwise.
*/ */
if (!ItemPointerIsValid(&Ctid)) if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self; newtup.t_data->t_ctid = newtup.t_self;
@ -1459,11 +1501,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
newtup.t_data->t_ctid = Ctid; newtup.t_data->t_ctid = Ctid;
Ctid = newtup.t_self; Ctid = newtup.t_self;
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
num_moved++; num_moved++;
/* /*
@ -1504,10 +1541,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} }
} }
WriteBuffer(cur_buffer); WriteBuffer(cur_buffer);
if (Cbuf == buf) WriteBuffer(Cbuf);
ReleaseBuffer(Cbuf);
else
WriteBuffer(Cbuf);
} }
cur_buffer = InvalidBuffer; cur_buffer = InvalidBuffer;
pfree(vtmove); pfree(vtmove);