Code cleanup for heap_freeze_tuple.

It used to be case that lazy vacuum could call this function with only
a shared lock on the buffer, but neither lazy vacuum nor any other
code path does that any more.  Simplify the code accordingly and clean
up some related, obsolete comments.
This commit is contained in:
Robert Haas 2012-03-26 11:03:06 -04:00
parent e8476f46fc
commit 7386089d23
4 changed files with 7 additions and 55 deletions

View File

@ -3947,10 +3947,8 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
* because this function is applied during WAL recovery, when we don't have
* access to any such state, and can't depend on the hint bits to be set.)
*
* In lazy VACUUM, we call this while initially holding only a shared lock
* on the tuple's buffer. If any change is needed, we trade that in for an
* exclusive lock before making the change. Caller should pass the buffer ID
* if shared lock is held, InvalidBuffer if exclusive lock is already held.
* If the tuple is in a shared buffer, caller must hold an exclusive lock on
* that buffer.
*
* Note: it might seem we could make the changes without exclusive lock, since
* TransactionId read/write is assumed atomic anyway. However there is a race
@ -3962,8 +3960,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
* infomask bits.
*/
bool
heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf)
heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid)
{
bool changed = false;
TransactionId xid;
@ -3972,13 +3969,6 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
{
if (buf != InvalidBuffer)
{
/* trade in share lock for exclusive lock */
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
}
HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
/*
@ -3990,28 +3980,12 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
changed = true;
}
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
* exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin, and
* there's only one VACUUM allowed on a table at a time.)
*/
recheck_xmax:
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
{
xid = HeapTupleHeaderGetXmax(tuple);
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
{
if (buf != InvalidBuffer)
{
/* trade in share lock for exclusive lock */
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
goto recheck_xmax; /* see comment above */
}
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/*
@ -4046,30 +4020,15 @@ recheck_xmax:
}
/*
* Although xvac per se could only be set by old-style VACUUM FULL, it
* shares physical storage space with cmax, and so could be wiped out by
* someone setting xmax. Hence recheck after changing lock, same as for
* xmax itself.
*
* Old-style VACUUM FULL is gone, but we have to keep this code as long as
* we support having MOVED_OFF/MOVED_IN tuples in the database.
*/
recheck_xvac:
if (tuple->t_infomask & HEAP_MOVED)
{
xid = HeapTupleHeaderGetXvac(tuple);
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
{
if (buf != InvalidBuffer)
{
/* trade in share lock for exclusive lock */
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
goto recheck_xvac; /* see comment above */
}
/*
* If a MOVED_OFF tuple is not dead, the xvac transaction must
* have failed; whereas a non-dead MOVED_IN tuple must mean the
@ -4711,7 +4670,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
ItemId lp = PageGetItemId(page, *offsets);
HeapTupleHeader tuple = (HeapTupleHeader) PageGetItem(page, lp);
(void) heap_freeze_tuple(tuple, cutoff_xid, InvalidBuffer);
(void) heap_freeze_tuple(tuple, cutoff_xid);
offsets++;
}
}

View File

@ -335,13 +335,8 @@ rewrite_heap_tuple(RewriteState state,
/*
* While we have our hands on the tuple, we may as well freeze any
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
* be given a pointer to a tuple in a disk buffer. It happens though that
* we can get the right things to happen by passing InvalidBuffer for the
* buffer.
*/
heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);
heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid);
/*
* Invalid ctid means that ctid should point to the tuple itself. We'll

View File

@ -784,8 +784,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Each non-removable tuple must be checked to see if it needs
* freezing. Note we already have exclusive buffer lock.
*/
if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
InvalidBuffer))
if (heap_freeze_tuple(tuple.t_data, FreezeLimit))
frozen[nfrozen++] = offnum;
}
} /* scan along page */

View File

@ -111,8 +111,7 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
TransactionId *update_xmax, CommandId cid,
LockTupleMode mode, bool nowait);
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf);
extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid);
extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf);