Fix VM buffer pin management in heap_lock_updated_tuple_rec().

Sloppy coding in this function could lead to leaking a VM buffer pin,
or to attempting to free the same pin twice.  Repair.  While at it,
reduce the code's tendency to free and reacquire the same page pin.

Back-patch to 9.6; before that, this routine did not concern itself
with VM pages.

Amit Kapila and Tom Lane

Discussion: https://postgr.es/m/CAA4eK1KJKwhc=isgTQHjM76CAdVswzNeAuZkh_cx-6QgGkSEgA@mail.gmail.com
This commit is contained in:
Tom Lane 2018-03-02 17:40:48 -05:00
parent e94f2bc809
commit 0b1d1a038b
1 changed files with 16 additions and 8 deletions

View File

@ -5677,6 +5677,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
new_xmax; new_xmax;
TransactionId priorXmax = InvalidTransactionId; TransactionId priorXmax = InvalidTransactionId;
bool cleared_all_frozen = false; bool cleared_all_frozen = false;
bool pinned_desired_page;
Buffer vmbuffer = InvalidBuffer; Buffer vmbuffer = InvalidBuffer;
BlockNumber block; BlockNumber block;
@ -5698,7 +5699,8 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
* chain, and there's no further tuple to lock: return success to * chain, and there's no further tuple to lock: return success to
* caller. * caller.
*/ */
return HeapTupleMayBeUpdated; result = HeapTupleMayBeUpdated;
goto out_unlocked;
} }
l4: l4:
@ -5711,9 +5713,12 @@ l4:
* to recheck after we have the lock. * to recheck after we have the lock.
*/ */
if (PageIsAllVisible(BufferGetPage(buf))) if (PageIsAllVisible(BufferGetPage(buf)))
{
visibilitymap_pin(rel, block, &vmbuffer); visibilitymap_pin(rel, block, &vmbuffer);
pinned_desired_page = true;
}
else else
vmbuffer = InvalidBuffer; pinned_desired_page = false;
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@ -5722,8 +5727,13 @@ l4:
* all visible while we were busy locking the buffer, we'll have to * all visible while we were busy locking the buffer, we'll have to
* unlock and re-lock, to avoid holding the buffer lock across I/O. * unlock and re-lock, to avoid holding the buffer lock across I/O.
* That's a bit unfortunate, but hopefully shouldn't happen often. * That's a bit unfortunate, but hopefully shouldn't happen often.
*
* Note: in some paths through this function, we will reach here
* holding a pin on a vm page that may or may not be the one matching
* this page. If this page isn't all-visible, we won't use the vm
* page, but we hold onto such a pin till the end of the function.
*/ */
if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf))) if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
{ {
LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockBuffer(buf, BUFFER_LOCK_UNLOCK);
visibilitymap_pin(rel, block, &vmbuffer); visibilitymap_pin(rel, block, &vmbuffer);
@ -5749,8 +5759,8 @@ l4:
*/ */
if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data))) if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
{ {
UnlockReleaseBuffer(buf); result = HeapTupleMayBeUpdated;
return HeapTupleMayBeUpdated; goto out_locked;
} }
old_infomask = mytup.t_data->t_infomask; old_infomask = mytup.t_data->t_infomask;
@ -5957,8 +5967,6 @@ next:
priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data); priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid); ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
UnlockReleaseBuffer(buf); UnlockReleaseBuffer(buf);
if (vmbuffer != InvalidBuffer)
ReleaseBuffer(vmbuffer);
} }
result = HeapTupleMayBeUpdated; result = HeapTupleMayBeUpdated;
@ -5966,11 +5974,11 @@ next:
out_locked: out_locked:
UnlockReleaseBuffer(buf); UnlockReleaseBuffer(buf);
out_unlocked:
if (vmbuffer != InvalidBuffer) if (vmbuffer != InvalidBuffer)
ReleaseBuffer(vmbuffer); ReleaseBuffer(vmbuffer);
return result; return result;
} }
/* /*