postgresql/src/backend/executor/nodeLockRows.c

468 lines
12 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* nodeLockRows.c
* Routines to handle FOR UPDATE/FOR SHARE row locking
*
* Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/backend/executor/nodeLockRows.c
*
*-------------------------------------------------------------------------
*/
/*
* INTERFACE ROUTINES
* ExecLockRows - fetch locked rows
* ExecInitLockRows - initialize node and subnodes..
* ExecEndLockRows - shutdown node and subnodes
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "access/xact.h"
#include "executor/executor.h"
#include "executor/nodeLockRows.h"
#include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
#include "utils/rel.h"
#include "utils/tqual.h"
/* ----------------------------------------------------------------
* ExecLockRows
* ----------------------------------------------------------------
*/
static TupleTableSlot * /* return: a tuple or NULL */
ExecLockRows(PlanState *pstate)
{
LockRowsState *node = castNode(LockRowsState, pstate);
TupleTableSlot *slot;
EState *estate;
PlanState *outerPlan;
bool epq_needed;
ListCell *lc;
CHECK_FOR_INTERRUPTS();
/*
* get information from the node
*/
estate = node->ps.state;
outerPlan = outerPlanState(node);
/*
* Get next tuple from subplan, if any.
*/
lnext:
slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
return NULL;
/* We don't need EvalPlanQual unless we get updated tuple version(s) */
epq_needed = false;
/*
* Attempt to lock the source tuple(s). (Note we only have locking
* rowmarks in lr_arowMarks.)
*/
foreach(lc, node->lr_arowMarks)
{
ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
ExecRowMark *erm = aerm->rowmark;
HeapTuple *testTuple;
Datum datum;
bool isNull;
HeapTupleData tuple;
Buffer buffer;
HeapUpdateFailureData hufd;
LockTupleMode lockmode;
HTSU_Result test;
HeapTuple copyTuple;
/* clear any leftover test tuple for this rel */
testTuple = &(node->lr_curtuples[erm->rti - 1]);
if (*testTuple != NULL)
heap_freetuple(*testTuple);
*testTuple = NULL;
/* if child rel, must check whether it produced this row */
if (erm->rti != erm->prti)
{
Oid tableoid;
datum = ExecGetJunkAttribute(slot,
aerm->toidAttNo,
&isNull);
/* shouldn't ever get a null result... */
if (isNull)
elog(ERROR, "tableoid is NULL");
tableoid = DatumGetObjectId(datum);
Allow foreign tables to participate in inheritance. Foreign tables can now be inheritance children, or parents. Much of the system was already ready for this, but we had to fix a few things of course, mostly in the area of planner and executor handling of row locks. As side effects of this, allow foreign tables to have NOT VALID CHECK constraints (and hence to accept ALTER ... VALIDATE CONSTRAINT), and to accept ALTER SET STORAGE and ALTER SET WITH/WITHOUT OIDS. Continuing to disallow these things would've required bizarre and inconsistent special cases in inheritance behavior. Since foreign tables don't enforce CHECK constraints anyway, a NOT VALID one is a complete no-op, but that doesn't mean we shouldn't allow it. And it's possible that some FDWs might have use for SET STORAGE or SET WITH OIDS, though doubtless they will be no-ops for most. An additional change in support of this is that when a ModifyTable node has multiple target tables, they will all now be explicitly identified in EXPLAIN output, for example: Update on pt1 (cost=0.00..321.05 rows=3541 width=46) Update on pt1 Foreign Update on ft1 Foreign Update on ft2 Update on child3 -> Seq Scan on pt1 (cost=0.00..0.00 rows=1 width=46) -> Foreign Scan on ft1 (cost=100.00..148.03 rows=1170 width=46) -> Foreign Scan on ft2 (cost=100.00..148.03 rows=1170 width=46) -> Seq Scan on child3 (cost=0.00..25.00 rows=1200 width=46) This was done mainly to provide an unambiguous place to attach "Remote SQL" fields, but it is useful for inherited updates even when no foreign tables are involved. Shigeru Hanada and Etsuro Fujita, reviewed by Ashutosh Bapat and Kyotaro Horiguchi, some additional hacking by me
2015-03-22 18:53:11 +01:00
Assert(OidIsValid(erm->relid));
if (tableoid != erm->relid)
{
/* this child is inactive right now */
erm->ermActive = false;
ItemPointerSetInvalid(&(erm->curCtid));
continue;
}
}
erm->ermActive = true;
/* fetch the tuple's ctid */
datum = ExecGetJunkAttribute(slot,
aerm->ctidAttNo,
&isNull);
/* shouldn't ever get a null result... */
if (isNull)
elog(ERROR, "ctid is NULL");
/* requests for foreign tables must be passed to their FDW */
if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
{
FdwRoutine *fdwroutine;
bool updated = false;
fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
/* this should have been checked already, but let's be safe */
if (fdwroutine->RefetchForeignRow == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot lock rows in foreign table \"%s\"",
RelationGetRelationName(erm->relation))));
copyTuple = fdwroutine->RefetchForeignRow(estate,
erm,
datum,
&updated);
if (copyTuple == NULL)
{
/* couldn't get the lock, so skip this row */
goto lnext;
}
/* save locked tuple for possible EvalPlanQual testing below */
*testTuple = copyTuple;
/*
* if FDW says tuple was updated before getting locked, we need to
* perform EPQ testing to see if quals are still satisfied
*/
if (updated)
epq_needed = true;
continue;
}
/* okay, try to lock the tuple */
tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
Improve concurrency of foreign key locking This patch introduces two additional lock modes for tuples: "SELECT FOR KEY SHARE" and "SELECT FOR NO KEY UPDATE". These don't block each other, in contrast with already existing "SELECT FOR SHARE" and "SELECT FOR UPDATE". UPDATE commands that do not modify the values stored in the columns that are part of the key of the tuple now grab a SELECT FOR NO KEY UPDATE lock on the tuple, allowing them to proceed concurrently with tuple locks of the FOR KEY SHARE variety. Foreign key triggers now use FOR KEY SHARE instead of FOR SHARE; this means the concurrency improvement applies to them, which is the whole point of this patch. The added tuple lock semantics require some rejiggering of the multixact module, so that the locking level that each transaction is holding can be stored alongside its Xid. Also, multixacts now need to persist across server restarts and crashes, because they can now represent not only tuple locks, but also tuple updates. This means we need more careful tracking of lifetime of pg_multixact SLRU files; since they now persist longer, we require more infrastructure to figure out when they can be removed. pg_upgrade also needs to be careful to copy pg_multixact files over from the old server to the new, or at least part of multixact.c state, depending on the versions of the old and new servers. Tuple time qualification rules (HeapTupleSatisfies routines) need to be careful not to consider tuples with the "is multi" infomask bit set as being only locked; they might need to look up MultiXact values (i.e. possibly do pg_multixact I/O) to find out the Xid that updated a tuple, whereas they previously were assured to only use information readily available from the tuple header. This is considered acceptable, because the extra I/O would involve cases that would previously cause some commands to block waiting for concurrent transactions to finish. Another important change is the fact that locking tuples that have previously been updated causes the future versions to be marked as locked, too; this is essential for correctness of foreign key checks. This causes additional WAL-logging, also (there was previously a single WAL record for a locked tuple; now there are as many as updated copies of the tuple there exist.) With all this in place, contention related to tuples being checked by foreign key rules should be much reduced. As a bonus, the old behavior that a subtransaction grabbing a stronger tuple lock than the parent (sub)transaction held on a given tuple and later aborting caused the weaker lock to be lost, has been fixed. Many new spec files were added for isolation tester framework, to ensure overall behavior is sane. There's probably room for several more tests. There were several reviewers of this patch; in particular, Noah Misch and Andres Freund spent considerable time in it. Original idea for the patch came from Simon Riggs, after a problem report by Joel Jacobson. Most code is from me, with contributions from Marti Raudsepp, Alexander Shulgin, Noah Misch and Andres Freund. This patch was discussed in several pgsql-hackers threads; the most important start at the following message-ids: AANLkTimo9XVcEzfiBR-ut3KVNDkjm2Vxh+t8kAmWjPuv@mail.gmail.com 1290721684-sup-3951@alvh.no-ip.org 1294953201-sup-2099@alvh.no-ip.org 1320343602-sup-2290@alvh.no-ip.org 1339690386-sup-8927@alvh.no-ip.org 4FE5FF020200002500048A3D@gw.wicourts.gov 4FEAB90A0200002500048B7D@gw.wicourts.gov
2013-01-23 16:04:59 +01:00
switch (erm->markType)
{
case ROW_MARK_EXCLUSIVE:
lockmode = LockTupleExclusive;
break;
case ROW_MARK_NOKEYEXCLUSIVE:
lockmode = LockTupleNoKeyExclusive;
break;
case ROW_MARK_SHARE:
lockmode = LockTupleShare;
break;
case ROW_MARK_KEYSHARE:
lockmode = LockTupleKeyShare;
break;
default:
elog(ERROR, "unsupported rowmark type");
Phase 2 of pgindent updates. Change pg_bsd_indent to follow upstream rules for placement of comments to the right of code, and remove pgindent hack that caused comments following #endif to not obey the general rule. Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using the published version of pg_bsd_indent, but a hacked-up version that tried to minimize the amount of movement of comments to the right of code. The situation of interest is where such a comment has to be moved to the right of its default placement at column 33 because there's code there. BSD indent has always moved right in units of tab stops in such cases --- but in the previous incarnation, indent was working in 8-space tab stops, while now it knows we use 4-space tabs. So the net result is that in about half the cases, such comments are placed one tab stop left of before. This is better all around: it leaves more room on the line for comment text, and it means that in such cases the comment uniformly starts at the next 4-space tab stop after the code, rather than sometimes one and sometimes two tabs after. Also, ensure that comments following #endif are indented the same as comments following other preprocessor commands such as #else. That inconsistency turns out to have been self-inflicted damage from a poorly-thought-through post-indent "fixup" in pgindent. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
Improve concurrency of foreign key locking This patch introduces two additional lock modes for tuples: "SELECT FOR KEY SHARE" and "SELECT FOR NO KEY UPDATE". These don't block each other, in contrast with already existing "SELECT FOR SHARE" and "SELECT FOR UPDATE". UPDATE commands that do not modify the values stored in the columns that are part of the key of the tuple now grab a SELECT FOR NO KEY UPDATE lock on the tuple, allowing them to proceed concurrently with tuple locks of the FOR KEY SHARE variety. Foreign key triggers now use FOR KEY SHARE instead of FOR SHARE; this means the concurrency improvement applies to them, which is the whole point of this patch. The added tuple lock semantics require some rejiggering of the multixact module, so that the locking level that each transaction is holding can be stored alongside its Xid. Also, multixacts now need to persist across server restarts and crashes, because they can now represent not only tuple locks, but also tuple updates. This means we need more careful tracking of lifetime of pg_multixact SLRU files; since they now persist longer, we require more infrastructure to figure out when they can be removed. pg_upgrade also needs to be careful to copy pg_multixact files over from the old server to the new, or at least part of multixact.c state, depending on the versions of the old and new servers. Tuple time qualification rules (HeapTupleSatisfies routines) need to be careful not to consider tuples with the "is multi" infomask bit set as being only locked; they might need to look up MultiXact values (i.e. possibly do pg_multixact I/O) to find out the Xid that updated a tuple, whereas they previously were assured to only use information readily available from the tuple header. This is considered acceptable, because the extra I/O would involve cases that would previously cause some commands to block waiting for concurrent transactions to finish. Another important change is the fact that locking tuples that have previously been updated causes the future versions to be marked as locked, too; this is essential for correctness of foreign key checks. This causes additional WAL-logging, also (there was previously a single WAL record for a locked tuple; now there are as many as updated copies of the tuple there exist.) With all this in place, contention related to tuples being checked by foreign key rules should be much reduced. As a bonus, the old behavior that a subtransaction grabbing a stronger tuple lock than the parent (sub)transaction held on a given tuple and later aborting caused the weaker lock to be lost, has been fixed. Many new spec files were added for isolation tester framework, to ensure overall behavior is sane. There's probably room for several more tests. There were several reviewers of this patch; in particular, Noah Misch and Andres Freund spent considerable time in it. Original idea for the patch came from Simon Riggs, after a problem report by Joel Jacobson. Most code is from me, with contributions from Marti Raudsepp, Alexander Shulgin, Noah Misch and Andres Freund. This patch was discussed in several pgsql-hackers threads; the most important start at the following message-ids: AANLkTimo9XVcEzfiBR-ut3KVNDkjm2Vxh+t8kAmWjPuv@mail.gmail.com 1290721684-sup-3951@alvh.no-ip.org 1294953201-sup-2099@alvh.no-ip.org 1320343602-sup-2290@alvh.no-ip.org 1339690386-sup-8927@alvh.no-ip.org 4FE5FF020200002500048A3D@gw.wicourts.gov 4FEAB90A0200002500048B7D@gw.wicourts.gov
2013-01-23 16:04:59 +01:00
break;
}
test = heap_lock_tuple(erm->relation, &tuple,
estate->es_output_cid,
lockmode, erm->waitPolicy, true,
&buffer, &hufd);
ReleaseBuffer(buffer);
switch (test)
{
case HeapTupleWouldBlock:
/* couldn't lock tuple in SKIP LOCKED mode */
goto lnext;
case HeapTupleSelfUpdated:
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
* transaction. We *must* ignore the tuple in the former
* case, so as to avoid the "Halloween problem" of repeated
* update attempts. In the latter case it might be sensible
* to fetch the updated tuple instead, but doing so would
2015-05-24 03:35:49 +02:00
* require changing heap_update and heap_delete to not
* complain about updating "invisible" tuples, which seems
* pretty scary (heap_lock_tuple will not complain, but few
* callers expect HeapTupleInvisible, and we're not one of
* them). So for now, treat the tuple as deleted and do not
* process.
*/
goto lnext;
case HeapTupleMayBeUpdated:
/* got the lock successfully */
break;
case HeapTupleUpdated:
if (IsolationUsesXactSnapshot())
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
if (ItemPointerEquals(&hufd.ctid, &tuple.t_self))
{
/* Tuple was deleted, so don't return it */
goto lnext;
}
/* updated, so fetch and lock the updated version */
Allow foreign tables to participate in inheritance. Foreign tables can now be inheritance children, or parents. Much of the system was already ready for this, but we had to fix a few things of course, mostly in the area of planner and executor handling of row locks. As side effects of this, allow foreign tables to have NOT VALID CHECK constraints (and hence to accept ALTER ... VALIDATE CONSTRAINT), and to accept ALTER SET STORAGE and ALTER SET WITH/WITHOUT OIDS. Continuing to disallow these things would've required bizarre and inconsistent special cases in inheritance behavior. Since foreign tables don't enforce CHECK constraints anyway, a NOT VALID one is a complete no-op, but that doesn't mean we shouldn't allow it. And it's possible that some FDWs might have use for SET STORAGE or SET WITH OIDS, though doubtless they will be no-ops for most. An additional change in support of this is that when a ModifyTable node has multiple target tables, they will all now be explicitly identified in EXPLAIN output, for example: Update on pt1 (cost=0.00..321.05 rows=3541 width=46) Update on pt1 Foreign Update on ft1 Foreign Update on ft2 Update on child3 -> Seq Scan on pt1 (cost=0.00..0.00 rows=1 width=46) -> Foreign Scan on ft1 (cost=100.00..148.03 rows=1170 width=46) -> Foreign Scan on ft2 (cost=100.00..148.03 rows=1170 width=46) -> Seq Scan on child3 (cost=0.00..25.00 rows=1200 width=46) This was done mainly to provide an unambiguous place to attach "Remote SQL" fields, but it is useful for inherited updates even when no foreign tables are involved. Shigeru Hanada and Etsuro Fujita, reviewed by Ashutosh Bapat and Kyotaro Horiguchi, some additional hacking by me
2015-03-22 18:53:11 +01:00
copyTuple = EvalPlanQualFetch(estate, erm->relation,
lockmode, erm->waitPolicy,
&hufd.ctid, hufd.xmax);
if (copyTuple == NULL)
{
/*
* Tuple was deleted; or it's locked and we're under SKIP
* LOCKED policy, so don't return it
*/
goto lnext;
}
/* remember the actually locked tuple's TID */
tuple.t_self = copyTuple->t_self;
/* Save locked tuple for EvalPlanQual testing below */
*testTuple = copyTuple;
/* Remember we need to do EPQ testing */
epq_needed = true;
/* Continue loop until we have all target tuples */
break;
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE. The newly added ON CONFLICT clause allows to specify an alternative to raising a unique or exclusion constraint violation error when inserting. ON CONFLICT refers to constraints that can either be specified using a inference clause (by specifying the columns of a unique constraint) or by naming a unique or exclusion constraint. DO NOTHING avoids the constraint violation, without touching the pre-existing row. DO UPDATE SET ... [WHERE ...] updates the pre-existing tuple, and has access to both the tuple proposed for insertion and the existing tuple; the optional WHERE clause can be used to prevent an update from being executed. The UPDATE SET and WHERE clauses have access to the tuple proposed for insertion using the "magic" EXCLUDED alias, and to the pre-existing tuple using the table name or its alias. This feature is often referred to as upsert. This is implemented using a new infrastructure called "speculative insertion". It is an optimistic variant of regular insertion that first does a pre-check for existing tuples and then attempts an insert. If a violating tuple was inserted concurrently, the speculatively inserted tuple is deleted and a new attempt is made. If the pre-check finds a matching tuple the alternative DO NOTHING or DO UPDATE action is taken. If the insertion succeeds without detecting a conflict, the tuple is deemed inserted. To handle the possible ambiguity between the excluded alias and a table named excluded, and for convenience with long relation names, INSERT INTO now can alias its target table. Bumps catversion as stored rules change. Author: Peter Geoghegan, with significant contributions from Heikki Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes. Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs, Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
case HeapTupleInvisible:
elog(ERROR, "attempted to lock invisible tuple");
default:
elog(ERROR, "unrecognized heap_lock_tuple status: %u",
test);
}
/* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
erm->curCtid = tuple.t_self;
}
/*
* If we need to do EvalPlanQual testing, do so.
*/
if (epq_needed)
{
/* Initialize EPQ machinery */
EvalPlanQualBegin(&node->lr_epqstate, estate);
/*
* Transfer any already-fetched tuples into the EPQ state, and fetch a
* copy of any rows that were successfully locked without any update
* having occurred. (We do this in a separate pass so as to avoid
* overhead in the common case where there are no concurrent updates.)
* Make sure any inactive child rels have NULL test tuples in EPQ.
*/
foreach(lc, node->lr_arowMarks)
{
ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
ExecRowMark *erm = aerm->rowmark;
HeapTupleData tuple;
Buffer buffer;
/* skip non-active child tables, but clear their test tuples */
if (!erm->ermActive)
{
Assert(erm->rti != erm->prti); /* check it's child table */
EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, NULL);
continue;
}
/* was tuple updated and fetched above? */
if (node->lr_curtuples[erm->rti - 1] != NULL)
{
/* yes, so set it as the EPQ test tuple for this rel */
EvalPlanQualSetTuple(&node->lr_epqstate,
erm->rti,
node->lr_curtuples[erm->rti - 1]);
/* freeing this tuple is now the responsibility of EPQ */
node->lr_curtuples[erm->rti - 1] = NULL;
continue;
}
/* foreign tables should have been fetched above */
Assert(erm->relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE);
Assert(ItemPointerIsValid(&(erm->curCtid)));
/* okay, fetch the tuple */
tuple.t_self = erm->curCtid;
if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
false, NULL))
elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
/* successful, copy and store tuple */
EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti,
heap_copytuple(&tuple));
ReleaseBuffer(buffer);
}
2010-02-26 03:01:40 +01:00
/*
2010-02-26 03:01:40 +01:00
* Now fetch any non-locked source rows --- the EPQ logic knows how to
* do that.
*/
EvalPlanQualSetSlot(&node->lr_epqstate, slot);
EvalPlanQualFetchRowMarks(&node->lr_epqstate);
2010-02-26 03:01:40 +01:00
/*
* And finally we can re-evaluate the tuple.
*/
slot = EvalPlanQualNext(&node->lr_epqstate);
if (TupIsNull(slot))
{
/* Updated tuple fails qual, so ignore it and go on */
goto lnext;
}
}
/* Got all locks, so return the current tuple */
return slot;
}
/* ----------------------------------------------------------------
* ExecInitLockRows
*
* This initializes the LockRows node state structures and
* the node's subplan.
* ----------------------------------------------------------------
*/
LockRowsState *
ExecInitLockRows(LockRows *node, EState *estate, int eflags)
{
LockRowsState *lrstate;
Plan *outerPlan = outerPlan(node);
List *epq_arowmarks;
ListCell *lc;
/* check for unsupported flags */
Assert(!(eflags & EXEC_FLAG_MARK));
/*
* create state structure
*/
lrstate = makeNode(LockRowsState);
lrstate->ps.plan = (Plan *) node;
lrstate->ps.state = estate;
lrstate->ps.ExecProcNode = ExecLockRows;
/*
* Miscellaneous initialization
*
* LockRows nodes never call ExecQual or ExecProject.
*/
/*
* Tuple table initialization (XXX not actually used...)
*/
ExecInitResultTupleSlot(estate, &lrstate->ps);
/*
* then initialize outer plan
*/
outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
/*
2010-02-26 03:01:40 +01:00
* LockRows nodes do no projections, so initialize projection info for
* this node appropriately
*/
ExecAssignResultTypeFromTL(&lrstate->ps);
lrstate->ps.ps_ProjInfo = NULL;
/*
* Create workspace in which we can remember per-RTE locked tuples
*/
lrstate->lr_ntables = list_length(estate->es_range_table);
lrstate->lr_curtuples = (HeapTuple *)
palloc0(lrstate->lr_ntables * sizeof(HeapTuple));
/*
* Locate the ExecRowMark(s) that this node is responsible for, and
* construct ExecAuxRowMarks for them. (InitPlan should already have
* built the global list of ExecRowMarks.)
*/
lrstate->lr_arowMarks = NIL;
epq_arowmarks = NIL;
foreach(lc, node->rowMarks)
{
PlanRowMark *rc = lfirst_node(PlanRowMark, lc);
ExecRowMark *erm;
ExecAuxRowMark *aerm;
/* ignore "parent" rowmarks; they are irrelevant at runtime */
if (rc->isParent)
continue;
/* safety check on size of lr_curtuples array */
Assert(rc->rti > 0 && rc->rti <= lrstate->lr_ntables);
/* find ExecRowMark and build ExecAuxRowMark */
erm = ExecFindRowMark(estate, rc->rti, false);
aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
/*
* Only locking rowmarks go into our own list. Non-locking marks are
2010-02-26 03:01:40 +01:00
* passed off to the EvalPlanQual machinery. This is because we don't
* want to bother fetching non-locked rows unless we actually have to
* do an EPQ recheck.
*/
if (RowMarkRequiresRowShareLock(erm->markType))
lrstate->lr_arowMarks = lappend(lrstate->lr_arowMarks, aerm);
else
epq_arowmarks = lappend(epq_arowmarks, aerm);
}
/* Now we have the info needed to set up EPQ state */
EvalPlanQualInit(&lrstate->lr_epqstate, estate,
outerPlan, epq_arowmarks, node->epqParam);
return lrstate;
}
/* ----------------------------------------------------------------
* ExecEndLockRows
*
* This shuts down the subplan and frees resources allocated
* to this node.
* ----------------------------------------------------------------
*/
void
ExecEndLockRows(LockRowsState *node)
{
EvalPlanQualEnd(&node->lr_epqstate);
ExecEndNode(outerPlanState(node));
}
void
ExecReScanLockRows(LockRowsState *node)
{
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
*/
if (node->ps.lefttree->chgParam == NULL)
ExecReScan(node->ps.lefttree);
}