From 2b65bf046d8a23be25502638da77a1592da2548d Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Mon, 3 Apr 2023 16:54:31 +0300 Subject: [PATCH] Revert 11470f544e Discussion: https://postgr.es/m/20230323003003.plgaxjqahjgkuxrk%40awork3.anarazel.de --- src/backend/access/heap/heapam_handler.c | 109 +-------- src/backend/access/table/tableam.c | 6 +- src/backend/executor/nodeModifyTable.c | 290 +++++++++++++---------- src/include/access/tableam.h | 28 +-- src/include/executor/tuptable.h | 38 --- src/tools/pgindent/typedefs.list | 2 - 6 files changed, 187 insertions(+), 286 deletions(-) diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index 97b5daee92..e2e35b71ea 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -45,12 +45,6 @@ #include "utils/builtins.h" #include "utils/rel.h" -static TM_Result heapam_tuple_lock_internal(Relation relation, ItemPointer tid, - Snapshot snapshot, TupleTableSlot *slot, - CommandId cid, LockTupleMode mode, - LockWaitPolicy wait_policy, uint8 flags, - TM_FailureData *tmfd, bool updated); - static void reform_and_rewrite_tuple(HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate); @@ -305,46 +299,14 @@ heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot, static TM_Result heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, - TM_FailureData *tmfd, bool changingPart, - LazyTupleTableSlot *lockedSlot) + TM_FailureData *tmfd, bool changingPart) { - TM_Result result; - /* * Currently Deleting of index tuples are handled at vacuum, in case if * the storage itself is cleaning the dead tuples by itself, it is the * time to call the index tuple deletion also. */ - result = heap_delete(relation, tid, cid, crosscheck, wait, - tmfd, changingPart); - - /* - * If the tuple has been concurrently updated, then get the lock on it. - * (Do this if caller asked for tat by providing a 'lockedSlot'.) With the - * lock held retry of delete should succeed even if there are more - * concurrent update attempts. - */ - if (result == TM_Updated && lockedSlot) - { - TupleTableSlot *evalSlot; - - Assert(wait); - - evalSlot = LAZY_TTS_EVAL(lockedSlot); - result = heapam_tuple_lock_internal(relation, tid, snapshot, - evalSlot, cid, LockTupleExclusive, - LockWaitBlock, - TUPLE_LOCK_FLAG_FIND_LAST_VERSION, - tmfd, true); - - if (result == TM_Ok) - { - tmfd->traversed = true; - return TM_Updated; - } - } - - return result; + return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart); } @@ -352,8 +314,7 @@ static TM_Result heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, - LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes, - LazyTupleTableSlot *lockedSlot) + LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes) { bool shouldFree = true; HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree); @@ -391,32 +352,6 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, if (shouldFree) pfree(tuple); - /* - * If the tuple has been concurrently updated, then get the lock on it. - * (Do this if caller asked for tat by providing a 'lockedSlot'.) With the - * lock held retry of update should succeed even if there are more - * concurrent update attempts. - */ - if (result == TM_Updated && lockedSlot) - { - TupleTableSlot *evalSlot; - - Assert(wait); - - evalSlot = LAZY_TTS_EVAL(lockedSlot); - result = heapam_tuple_lock_internal(relation, otid, snapshot, - evalSlot, cid, *lockmode, - LockWaitBlock, - TUPLE_LOCK_FLAG_FIND_LAST_VERSION, - tmfd, true); - - if (result == TM_Ok) - { - tmfd->traversed = true; - return TM_Updated; - } - } - return result; } @@ -425,26 +360,10 @@ heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd) -{ - return heapam_tuple_lock_internal(relation, tid, snapshot, slot, cid, - mode, wait_policy, flags, tmfd, false); -} - -/* - * This routine does the work for heapam_tuple_lock(), but also support - * `updated` argument to re-use the work done by heapam_tuple_update() or - * heapam_tuple_delete() on figuring out that tuple was concurrently updated. - */ -static TM_Result -heapam_tuple_lock_internal(Relation relation, ItemPointer tid, - Snapshot snapshot, TupleTableSlot *slot, - CommandId cid, LockTupleMode mode, - LockWaitPolicy wait_policy, uint8 flags, - TM_FailureData *tmfd, bool updated) { BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot; TM_Result result; - Buffer buffer = InvalidBuffer; + Buffer buffer; HeapTuple tuple = &bslot->base.tupdata; bool follow_updates; @@ -455,26 +374,16 @@ heapam_tuple_lock_internal(Relation relation, ItemPointer tid, tuple_lock_retry: tuple->t_self = *tid; - if (!updated) - result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy, - follow_updates, &buffer, tmfd); - else - result = TM_Updated; + result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy, + follow_updates, &buffer, tmfd); if (result == TM_Updated && (flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION)) { - if (!updated) - { - /* Should not encounter speculative tuple on recheck */ - Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data)); + /* Should not encounter speculative tuple on recheck */ + Assert(!HeapTupleHeaderIsSpeculative(tuple->t_data)); - ReleaseBuffer(buffer); - } - else - { - updated = false; - } + ReleaseBuffer(buffer); if (!ItemPointerEquals(&tmfd->ctid, &tuple->t_self)) { diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index 2a1a6ced3c..a5e6c92f35 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -306,8 +306,7 @@ simple_table_tuple_delete(Relation rel, ItemPointer tid, Snapshot snapshot) GetCurrentCommandId(true), snapshot, InvalidSnapshot, true /* wait for commit */ , - &tmfd, false /* changingPart */ , - NULL); + &tmfd, false /* changingPart */ ); switch (result) { @@ -356,8 +355,7 @@ simple_table_tuple_update(Relation rel, ItemPointer otid, GetCurrentCommandId(true), snapshot, InvalidSnapshot, true /* wait for commit */ , - &tmfd, &lockmode, update_indexes, - NULL); + &tmfd, &lockmode, update_indexes); switch (result) { diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index e350375681..93ebfdbb0d 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1324,62 +1324,26 @@ ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, return true; } -/* - * The implementation for LazyTupleTableSlot wrapper for EPQ slot to be passed - * to table_tuple_update()/table_tuple_delete(). - */ -typedef struct -{ - EPQState *epqstate; - ResultRelInfo *resultRelInfo; -} GetEPQSlotArg; - -static TupleTableSlot * -GetEPQSlot(void *arg) -{ - GetEPQSlotArg *slotArg = (GetEPQSlotArg *) arg; - - return EvalPlanQualSlot(slotArg->epqstate, - slotArg->resultRelInfo->ri_RelationDesc, - slotArg->resultRelInfo->ri_RangeTableIndex); -} - /* * ExecDeleteAct -- subroutine for ExecDelete * * Actually delete the tuple from a plain table. * - * If the 'lockUpdated' flag is set and the target tuple is updated, then - * the latest version gets locked and fetched into the EPQ slot. - * * Caller is in charge of doing EvalPlanQual as necessary */ static TM_Result ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, - ItemPointer tupleid, bool changingPart, bool lockUpdated) + ItemPointer tupleid, bool changingPart) { EState *estate = context->estate; - GetEPQSlotArg slotArg = {context->epqstate, resultRelInfo}; - LazyTupleTableSlot lazyEPQSlot, - *lazyEPQSlotPtr; - if (lockUpdated) - { - MAKE_LAZY_TTS(&lazyEPQSlot, GetEPQSlot, &slotArg); - lazyEPQSlotPtr = &lazyEPQSlot; - } - else - { - lazyEPQSlotPtr = NULL; - } return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid, estate->es_output_cid, estate->es_snapshot, estate->es_crosscheck_snapshot, true /* wait for commit */ , &context->tmfd, - changingPart, - lazyEPQSlotPtr); + changingPart); } /* @@ -1524,8 +1488,7 @@ ExecDelete(ModifyTableContext *context, * transaction-snapshot mode transactions. */ ldelete: - result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart, - !IsolationUsesXactSnapshot()); + result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart); switch (result) { @@ -1578,49 +1541,103 @@ ldelete: errmsg("could not serialize access due to concurrent update"))); /* - * ExecDeleteAct() has already locked the old tuple for - * us. Now we need to copy it to the right slot. + * Already know that we're going to need to do EPQ, so + * fetch tuple directly into the right slot. */ EvalPlanQualBegin(context->epqstate); inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc, resultRelInfo->ri_RangeTableIndex); - /* - * Save locked table for further processing for RETURNING - * clause. - */ - if (processReturning && - resultRelInfo->ri_projectReturning && - !resultRelInfo->ri_FdwRoutine) - { - TupleTableSlot *returningSlot; + result = table_tuple_lock(resultRelationDesc, tupleid, + estate->es_snapshot, + inputslot, estate->es_output_cid, + LockTupleExclusive, LockWaitBlock, + TUPLE_LOCK_FLAG_FIND_LAST_VERSION, + &context->tmfd); - returningSlot = ExecGetReturningSlot(estate, - resultRelInfo); - ExecCopySlot(returningSlot, inputslot); - ExecMaterializeSlot(returningSlot); + switch (result) + { + case TM_Ok: + Assert(context->tmfd.traversed); + + /* + * Save locked tuple for further processing of + * RETURNING clause. + */ + if (processReturning && + resultRelInfo->ri_projectReturning && + !resultRelInfo->ri_FdwRoutine) + { + TupleTableSlot *returningSlot; + + returningSlot = ExecGetReturningSlot(estate, resultRelInfo); + ExecCopySlot(returningSlot, inputslot); + ExecMaterializeSlot(returningSlot); + } + + epqslot = EvalPlanQual(context->epqstate, + resultRelationDesc, + resultRelInfo->ri_RangeTableIndex, + inputslot); + if (TupIsNull(epqslot)) + /* Tuple not passing quals anymore, exiting... */ + return NULL; + + /* + * If requested, skip delete and pass back the + * updated row. + */ + if (epqreturnslot) + { + *epqreturnslot = epqslot; + return NULL; + } + else + goto ldelete; + + case TM_SelfModified: + + /* + * This can be reached when following an update + * chain from a tuple updated by another session, + * reaching a tuple that was already updated in + * this transaction. If previously updated by this + * command, ignore the delete, otherwise error + * out. + * + * See also TM_SelfModified response to + * table_tuple_delete() above. + */ + if (context->tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be deleted was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + return NULL; + + case TM_Deleted: + /* tuple already deleted; nothing to do */ + return NULL; + + default: + + /* + * TM_Invisible should be impossible because we're + * waiting for updated row versions, and would + * already have errored out if the first version + * is invisible. + * + * TM_Updated should be impossible, because we're + * locking the latest version via + * TUPLE_LOCK_FLAG_FIND_LAST_VERSION. + */ + elog(ERROR, "unexpected table_tuple_lock status: %u", + result); + return NULL; } - Assert(context->tmfd.traversed); - epqslot = EvalPlanQual(context->epqstate, - resultRelationDesc, - resultRelInfo->ri_RangeTableIndex, - inputslot); - if (TupIsNull(epqslot)) - /* Tuple not passing quals anymore, exiting... */ - return NULL; - - /* - * If requested, skip delete and pass back the updated - * row. - */ - if (epqreturnslot) - { - *epqreturnslot = epqslot; - return NULL; - } - else - goto ldelete; + Assert(false); + break; } case TM_Deleted: @@ -1965,15 +1982,12 @@ ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo, static TM_Result ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, - bool canSetTag, bool lockUpdated, UpdateContext *updateCxt) + bool canSetTag, UpdateContext *updateCxt) { EState *estate = context->estate; Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; bool partition_constraint_failed; TM_Result result; - GetEPQSlotArg slotArg = {context->epqstate, resultRelInfo}; - LazyTupleTableSlot lazyEPQSlot, - *lazyEPQSlotPtr; updateCxt->crossPartUpdate = false; @@ -2099,23 +2113,13 @@ lreplace: * for referential integrity updates in transaction-snapshot mode * transactions. */ - if (lockUpdated) - { - MAKE_LAZY_TTS(&lazyEPQSlot, GetEPQSlot, &slotArg); - lazyEPQSlotPtr = &lazyEPQSlot; - } - else - { - lazyEPQSlotPtr = NULL; - } result = table_tuple_update(resultRelationDesc, tupleid, slot, estate->es_output_cid, estate->es_snapshot, estate->es_crosscheck_snapshot, true /* wait for commit */ , &context->tmfd, &updateCxt->lockmode, - &updateCxt->updateIndexes, - lazyEPQSlotPtr); + &updateCxt->updateIndexes); if (result == TM_Ok) updateCxt->updated = true; @@ -2269,7 +2273,7 @@ ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context, static TupleTableSlot * ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, - bool canSetTag, bool locked) + bool canSetTag) { EState *estate = context->estate; Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; @@ -2331,8 +2335,7 @@ ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo, */ redo_act: result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot, - canSetTag, !IsolationUsesXactSnapshot(), - &updateCxt); + canSetTag, &updateCxt); /* * If ExecUpdateAct reports that a cross-partition update was done, @@ -2391,39 +2394,81 @@ redo_act: ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); - Assert(!locked); /* - * ExecUpdateAct() has already locked the old tuple for - * us. Now we need to copy it to the right slot. + * Already know that we're going to need to do EPQ, so + * fetch tuple directly into the right slot. */ inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc, resultRelInfo->ri_RangeTableIndex); - /* Make sure ri_oldTupleSlot is initialized. */ - if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) - ExecInitUpdateProjection(context->mtstate, - resultRelInfo); + result = table_tuple_lock(resultRelationDesc, tupleid, + estate->es_snapshot, + inputslot, estate->es_output_cid, + updateCxt.lockmode, LockWaitBlock, + TUPLE_LOCK_FLAG_FIND_LAST_VERSION, + &context->tmfd); - /* - * Save the locked tuple for further calculation of the - * new tuple. - */ - oldSlot = resultRelInfo->ri_oldTupleSlot; - ExecCopySlot(oldSlot, inputslot); - ExecMaterializeSlot(oldSlot); - Assert(context->tmfd.traversed); + switch (result) + { + case TM_Ok: + Assert(context->tmfd.traversed); - epqslot = EvalPlanQual(context->epqstate, - resultRelationDesc, - resultRelInfo->ri_RangeTableIndex, - inputslot); - if (TupIsNull(epqslot)) - /* Tuple not passing quals anymore, exiting... */ - return NULL; - slot = ExecGetUpdateNewTuple(resultRelInfo, - epqslot, oldSlot); - goto redo_act; + /* Make sure ri_oldTupleSlot is initialized. */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(context->mtstate, + resultRelInfo); + + /* + * Save the locked tuple for further calculation + * of the new tuple. + */ + oldSlot = resultRelInfo->ri_oldTupleSlot; + ExecCopySlot(oldSlot, inputslot); + ExecMaterializeSlot(oldSlot); + + epqslot = EvalPlanQual(context->epqstate, + resultRelationDesc, + resultRelInfo->ri_RangeTableIndex, + inputslot); + if (TupIsNull(epqslot)) + /* Tuple not passing quals anymore, exiting... */ + return NULL; + + slot = ExecGetUpdateNewTuple(resultRelInfo, + epqslot, oldSlot); + goto redo_act; + + case TM_Deleted: + /* tuple already deleted; nothing to do */ + return NULL; + + case TM_SelfModified: + + /* + * This can be reached when following an update + * chain from a tuple updated by another session, + * reaching a tuple that was already updated in + * this transaction. If previously modified by + * this command, ignore the redundant update, + * otherwise error out. + * + * See also TM_SelfModified response to + * table_tuple_update() above. + */ + if (context->tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + return NULL; + + default: + /* see table_tuple_lock call in ExecDelete() */ + elog(ERROR, "unexpected table_tuple_lock status: %u", + result); + return NULL; + } } break; @@ -2665,7 +2710,7 @@ ExecOnConflictUpdate(ModifyTableContext *context, *returning = ExecUpdate(context, resultRelInfo, conflictTid, NULL, resultRelInfo->ri_onConflict->oc_ProjSlot, - canSetTag, true); + canSetTag); /* * Clear out existing tuple, as there might not be another conflict among @@ -2868,7 +2913,7 @@ lmerge_matched: break; /* concurrent update/delete */ } result = ExecUpdateAct(context, resultRelInfo, tupleid, NULL, - newslot, false, false, &updateCxt); + newslot, false, &updateCxt); if (result == TM_Ok && updateCxt.updated) { ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, @@ -2886,8 +2931,7 @@ lmerge_matched: return true; /* "do nothing" */ break; /* concurrent update/delete */ } - result = ExecDeleteAct(context, resultRelInfo, tupleid, - false, false); + result = ExecDeleteAct(context, resultRelInfo, tupleid, false); if (result == TM_Ok) { ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL, @@ -3793,7 +3837,7 @@ ExecModifyTable(PlanState *pstate) /* Now apply the update. */ slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, - slot, node->canSetTag, false); + slot, node->canSetTag); break; case CMD_DELETE: diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 7159365e65..50ae053f46 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -530,8 +530,7 @@ typedef struct TableAmRoutine Snapshot crosscheck, bool wait, TM_FailureData *tmfd, - bool changingPart, - LazyTupleTableSlot *lockedSlot); + bool changingPart); /* see table_tuple_update() for reference about parameters */ TM_Result (*tuple_update) (Relation rel, @@ -543,8 +542,7 @@ typedef struct TableAmRoutine bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, - TU_UpdateIndexes *update_indexes, - LazyTupleTableSlot *lockedSlot); + TU_UpdateIndexes *update_indexes); /* see table_tuple_lock() for reference about parameters */ TM_Result (*tuple_lock) (Relation rel, @@ -1459,7 +1457,7 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots, } /* - * Delete a tuple (or lock last tuple version if lockedSlot is given). + * Delete a tuple. * * NB: do not call this directly unless prepared to deal with * concurrent-update conditions. Use simple_table_tuple_delete instead. @@ -1475,8 +1473,6 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots, * tmfd - filled in failure cases (see below) * changingPart - true iff the tuple is being moved to another partition * table due to an update of the partition key. Otherwise, false. - * lockedSlot - lazy slot to save the locked tuple if should lock the last - * row version during the concurrent update. NULL if not needed. * * Normal, successful return value is TM_Ok, which means we did actually * delete it. Failure return codes are TM_SelfModified, TM_Updated, and @@ -1489,17 +1485,15 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots, static inline TM_Result table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, - TM_FailureData *tmfd, bool changingPart, - LazyTupleTableSlot *lockedSlot) + TM_FailureData *tmfd, bool changingPart) { return rel->rd_tableam->tuple_delete(rel, tid, cid, snapshot, crosscheck, - wait, tmfd, changingPart, - lockedSlot); + wait, tmfd, changingPart); } /* - * Update a tuple (or lock last tuple version if lockedSlot is given). + * Update a tuple. * * NB: do not call this directly unless you are prepared to deal with * concurrent-update conditions. Use simple_table_tuple_update instead. @@ -1517,9 +1511,7 @@ table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid, * lockmode - filled with lock mode acquired on tuple * update_indexes - in success cases this is set to true if new index entries * are required for this tuple - * lockedSlot - lazy slot to save the locked tuple if should lock the last - * row version during the concurrent update. NULL if not needed. - + * * Normal, successful return value is TM_Ok, which means we did actually * update it. Failure return codes are TM_SelfModified, TM_Updated, and * TM_BeingModified (the last only possible if wait == false). @@ -1538,14 +1530,12 @@ static inline TM_Result table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, - TU_UpdateIndexes *update_indexes, - LazyTupleTableSlot *lockedSlot) + TU_UpdateIndexes *update_indexes) { return rel->rd_tableam->tuple_update(rel, otid, slot, cid, snapshot, crosscheck, wait, tmfd, - lockmode, update_indexes, - lockedSlot); + lockmode, update_indexes); } /* diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index 2e13ecc3ff..ff64b7cb98 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -300,44 +300,6 @@ typedef struct MinimalTupleTableSlot #define TupIsNull(slot) \ ((slot) == NULL || TTS_EMPTY(slot)) -/*---------- - * LazyTupleTableSlot -- a lazy version of TupleTableSlot. - * - * Sometimes caller might need to pass to the function a slot, which most - * likely will reain undemanded. Preallocating such slot would be a waste of - * resources in the majority of cases. Lazy slot is aimed to resolve this - * problem. It is basically a promise to allocate the slot once it's needed. - * Once callee needs the slot, it could get it using LAZY_TTS_EVAL(lazySlot) - * macro. - */ -typedef struct -{ - TupleTableSlot *slot; /* cached slot or NULL if not yet allocated */ - TupleTableSlot *(*getSlot) (void *arg); /* callback for slot allocation */ - void *getSlotArg; /* argument for the callback above */ -} LazyTupleTableSlot; - -/* - * A constructor for the lazy slot. - */ -#define MAKE_LAZY_TTS(lazySlot, callback, arg) \ - do { \ - (lazySlot)->slot = NULL; \ - (lazySlot)->getSlot = callback; \ - (lazySlot)->getSlotArg = arg; \ - } while (false) - -/* - * Macro for lazy slot evaluation. NULL lazy slot evaluates to NULL slot. - * Cached version is used if present. Use the callback otherwise. - */ -#define LAZY_TTS_EVAL(lazySlot) \ - ((lazySlot) ? \ - ((lazySlot)->slot ? \ - (lazySlot)->slot : \ - ((lazySlot)->slot = (lazySlot)->getSlot((lazySlot)->getSlotArg))) : \ - NULL) - /* in executor/execTuples.c */ extern TupleTableSlot *MakeTupleTableSlot(TupleDesc tupleDesc, const TupleTableSlotOps *tts_ops); diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index b97174d160..5c0410869f 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -956,7 +956,6 @@ GenerationPointer GenericCosts GenericXLogState GeqoPrivateData -GetEPQSlotArg GetForeignJoinPaths_function GetForeignModifyBatchSize_function GetForeignPaths_function @@ -1402,7 +1401,6 @@ LagTracker LargeObjectDesc LastAttnumInfo Latch -LazyTupleTableSlot LerpFunc LexDescr LexemeEntry