From 2dc2e4e31adb71502074c8c2bf9e0766347aa6e5 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 30 Sep 2022 19:36:46 -0400 Subject: [PATCH] Avoid improbable PANIC during heap_update, redux. Commit 34f581c39 intended to ensure that RelationGetBufferForTuple would acquire a visibility-map page pin in case the otherBuffer's all-visible bit had become set since we last had lock on that page. But I missed a case: when we're extending the relation, VM concerns were dealt with only in the relatively-less-likely case that we fail to conditionally lock the otherBuffer. I think I'd believed that we couldn't need to worry about it if the conditional lock succeeds, which is true for the target buffer; but the otherBuffer was unlocked for awhile so its bit might be set anyway. So we need to do the GetVisibilityMapPins dance, and then also recheck the page's free space, in both cases. Per report from Jaime Casanova. Back-patch to v12 as the previous patch was (although there's still no evidence that the bug is reachable pre-v14). Discussion: https://postgr.es/m/E1lWLjP-00006Y-Ml@gemulon.postgresql.org --- src/backend/access/heap/hio.c | 41 ++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index ae2e2ce37a..b0ece66629 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -678,29 +678,34 @@ loop: LockBuffer(buffer, BUFFER_LOCK_UNLOCK); LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); + } - /* - * Because the buffers were unlocked for a while, it's possible, - * although unlikely, that an all-visible flag became set or that - * somebody used up the available space in the new page. We can - * use GetVisibilityMapPins to deal with the first case. In the - * second case, just retry from start. - */ - GetVisibilityMapPins(relation, otherBuffer, buffer, - otherBlock, targetBlock, vmbuffer_other, - vmbuffer); + /* + * Because the buffers were unlocked for a while, it's possible, + * although unlikely, that an all-visible flag became set or that + * somebody used up the available space in the new page. We can use + * GetVisibilityMapPins to deal with the first case. In the second + * case, just retry from start. + */ + GetVisibilityMapPins(relation, otherBuffer, buffer, + otherBlock, targetBlock, vmbuffer_other, + vmbuffer); - if (len > PageGetHeapFreeSpace(page)) - { - LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK); - UnlockReleaseBuffer(buffer); + /* + * Note that we have to check the available space even if our + * conditional lock succeeded, because GetVisibilityMapPins might've + * transiently released lock on the target buffer to acquire a VM pin + * for the otherBuffer. + */ + if (len > PageGetHeapFreeSpace(page)) + { + LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK); + UnlockReleaseBuffer(buffer); - goto loop; - } + goto loop; } } - - if (len > PageGetHeapFreeSpace(page)) + else if (len > PageGetHeapFreeSpace(page)) { /* We should not get here given the test at the top */ elog(PANIC, "tuple is too big: size %zu", len);