From bfa2ab56bb8c512dc8613ee3ff0936568a1c8418 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Fri, 15 Jul 2016 17:49:48 -0700 Subject: [PATCH] Fix torn-page, unlogged xid and further risks from heap_update(). When heap_update needs to look for a page for the new tuple version, because the current one doesn't have sufficient free space, or when columns have to be processed by the tuple toaster, it has to release the lock on the old page during that. Otherwise there'd be lock ordering and lock nesting issues. To avoid concurrent sessions from trying to update / delete / lock the tuple while the page's content lock is released, the tuple's xmax is set to the current session's xid. That unfortunately was done without any WAL logging, thereby violating the rule that no XIDs may appear on disk, without an according WAL record. If the database were to crash / fail over when the page level lock is released, and some activity lead to the page being written out to disk, the xid could end up being reused; potentially leading to the row becoming invisible. There might be additional risks by not having t_ctid point at the tuple itself, without having set the appropriate lock infomask fields. To fix, compute the appropriate xmax/infomask combination for locking the tuple, and perform WAL logging using the existing XLOG_HEAP_LOCK record. That allows the fix to be backpatched. This issue has existed for a long time. There appears to have been partial attempts at preventing dangers, but these never have fully been implemented, and were removed a long time ago, in 11919160 (cf. HEAP_XMAX_UNLOGGED). In master / 9.6, there's an additional issue, namely that the visibilitymap's freeze bit isn't reset at that point yet. Since that's a new issue, introduced only in a892234f830, that'll be fixed in a separate commit. Author: Masahiko Sawada and Andres Freund Reported-By: Different aspects by Thomas Munro, Noah Misch, and others Discussion: CAEepm=3fWAbWryVW9swHyLTY4sXVf0xbLvXqOwUoDiNCx9mBjQ@mail.gmail.com Backpatch: 9.1/all supported versions --- src/backend/access/heap/heapam.c | 96 ++++++++++++++++++++++++-------- 1 file changed, 73 insertions(+), 23 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 41b56546ba..2815d916f3 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3455,8 +3455,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, newbuf, vmbuffer = InvalidBuffer, vmbuffer_new = InvalidBuffer; - bool need_toast, - already_marked; + bool need_toast; Size newtupsize, pagefree; bool have_tuple_lock = false; @@ -3898,8 +3897,8 @@ l2: * on the same page as the old, then we need to release the content lock * (but not the pin!) on the old tuple's buffer while we are off doing * TOAST and/or table-file-extension work. We must mark the old tuple to - * show that it's already being updated, else other processes may try to - * update it themselves. + * show that it's locked, else other processes may try to update it + * themselves. * * We need to invoke the toaster if there are already any out-of-line * toasted values present, or if the new tuple is over-threshold. @@ -3923,19 +3922,73 @@ l2: if (need_toast || newtupsize > pagefree) { + TransactionId xmax_lock_old_tuple; + uint16 infomask_lock_old_tuple, + infomask2_lock_old_tuple; + + /* + * To prevent concurrent sessions from updating the tuple, we have to + * temporarily mark it locked, while we release the lock. + * + * To satisfy the rule that any xid potentially appearing in a buffer + * written out to disk, we unfortunately have to WAL log this + * temporary modification. We can reuse xl_heap_lock for this + * purpose. If we crash/error before following through with the + * actual update, xmax will be of an aborted transaction, allowing + * other sessions to proceed. + */ + + /* + * Compute xmax / infomask appropriate for locking the tuple. This has + * to be done separately from the lock, because the potentially + * created multixact would otherwise be wrong. + */ + compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data), + oldtup.t_data->t_infomask, + oldtup.t_data->t_infomask2, + xid, *lockmode, false, + &xmax_lock_old_tuple, &infomask_lock_old_tuple, + &infomask2_lock_old_tuple); + + Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple)); + + START_CRIT_SECTION(); + /* Clear obsolete visibility flags ... */ oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED; HeapTupleClearHotUpdated(&oldtup); /* ... and store info about transaction updating this tuple */ - Assert(TransactionIdIsValid(xmax_old_tuple)); - HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple); - oldtup.t_data->t_infomask |= infomask_old_tuple; - oldtup.t_data->t_infomask2 |= infomask2_old_tuple; + Assert(TransactionIdIsValid(xmax_lock_old_tuple)); + HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple); + oldtup.t_data->t_infomask |= infomask_lock_old_tuple; + oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple; HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo); - /* temporarily make it look not-updated */ + + /* temporarily make it look not-updated, but locked */ oldtup.t_data->t_ctid = oldtup.t_self; - already_marked = true; + + MarkBufferDirty(buffer); + + if (RelationNeedsWAL(relation)) + { + xl_heap_lock xlrec; + XLogRecPtr recptr; + + XLogBeginInsert(); + XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); + + xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self); + xlrec.locking_xid = xmax_lock_old_tuple; + xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask, + oldtup.t_data->t_infomask2); + XLogRegisterData((char *) &xlrec, SizeOfHeapLock); + recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK); + PageSetLSN(page, recptr); + } + + END_CRIT_SECTION(); + LockBuffer(buffer, BUFFER_LOCK_UNLOCK); /* @@ -4006,7 +4059,6 @@ l2: else { /* No TOAST work needed, and it'll fit on same page */ - already_marked = false; newbuf = buffer; heaptup = newtup; } @@ -4093,18 +4145,16 @@ l2: RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */ - if (!already_marked) - { - /* Clear obsolete visibility flags ... */ - oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); - oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED; - /* ... and store info about transaction updating this tuple */ - Assert(TransactionIdIsValid(xmax_old_tuple)); - HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple); - oldtup.t_data->t_infomask |= infomask_old_tuple; - oldtup.t_data->t_infomask2 |= infomask2_old_tuple; - HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo); - } + + /* Clear obsolete visibility flags, possibly set by ourselves above... */ + oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); + oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED; + /* ... and store info about transaction updating this tuple */ + Assert(TransactionIdIsValid(xmax_old_tuple)); + HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple); + oldtup.t_data->t_infomask |= infomask_old_tuple; + oldtup.t_data->t_infomask2 |= infomask2_old_tuple; + HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo); /* record address of new tuple in t_ctid of old one */ oldtup.t_data->t_ctid = heaptup->t_self;