1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* execScan.c
|
1997-09-07 07:04:48 +02:00
|
|
|
* This code provides support for generalized relation scans. ExecScan
|
|
|
|
* is passed a node and a pointer to a function to "do the right thing"
|
|
|
|
* and return a tuple from the relation. ExecScan then does the tedious
|
|
|
|
* stuff - checking the qualification and projecting the tuple
|
|
|
|
* appropriately.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2014-01-07 22:05:30 +01:00
|
|
|
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/executor/execScan.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2002-01-06 01:37:44 +01:00
|
|
|
#include "postgres.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
#include "executor/executor.h"
|
2002-01-06 01:37:44 +01:00
|
|
|
#include "miscadmin.h"
|
2000-07-17 05:05:41 +02:00
|
|
|
#include "utils/memutils.h"
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2004-01-22 03:23:21 +01:00
|
|
|
static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc);
|
2003-02-03 16:07:08 +01:00
|
|
|
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
/*
|
|
|
|
* ExecScanFetch -- fetch next potential tuple
|
|
|
|
*
|
|
|
|
* This routine is concerned with substituting a test tuple if we are
|
2014-05-06 18:12:18 +02:00
|
|
|
* inside an EvalPlanQual recheck. If we aren't, just execute
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
* the access method's next-tuple routine.
|
|
|
|
*/
|
|
|
|
static inline TupleTableSlot *
|
|
|
|
ExecScanFetch(ScanState *node,
|
|
|
|
ExecScanAccessMtd accessMtd,
|
|
|
|
ExecScanRecheckMtd recheckMtd)
|
|
|
|
{
|
|
|
|
EState *estate = node->ps.state;
|
|
|
|
|
|
|
|
if (estate->es_epqTuple != NULL)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We are inside an EvalPlanQual recheck. Return the test tuple if
|
|
|
|
* one is available, after rechecking any access-method-specific
|
|
|
|
* conditions.
|
|
|
|
*/
|
|
|
|
Index scanrelid = ((Scan *) node->ps.plan)->scanrelid;
|
|
|
|
|
|
|
|
Assert(scanrelid > 0);
|
|
|
|
if (estate->es_epqTupleSet[scanrelid - 1])
|
|
|
|
{
|
|
|
|
TupleTableSlot *slot = node->ss_ScanTupleSlot;
|
|
|
|
|
|
|
|
/* Return empty slot if we already returned a tuple */
|
|
|
|
if (estate->es_epqScanDone[scanrelid - 1])
|
|
|
|
return ExecClearTuple(slot);
|
|
|
|
/* Else mark to remember that we shouldn't return more */
|
|
|
|
estate->es_epqScanDone[scanrelid - 1] = true;
|
|
|
|
|
|
|
|
/* Return empty slot if we haven't got a test tuple */
|
|
|
|
if (estate->es_epqTuple[scanrelid - 1] == NULL)
|
|
|
|
return ExecClearTuple(slot);
|
|
|
|
|
|
|
|
/* Store test tuple in the plan node's scan slot */
|
|
|
|
ExecStoreTuple(estate->es_epqTuple[scanrelid - 1],
|
|
|
|
slot, InvalidBuffer, false);
|
|
|
|
|
|
|
|
/* Check if it meets the access-method conditions */
|
|
|
|
if (!(*recheckMtd) (node, slot))
|
|
|
|
ExecClearTuple(slot); /* would not be returned by scan */
|
|
|
|
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Run the node-type-specific access method function to get the next tuple
|
|
|
|
*/
|
|
|
|
return (*accessMtd) (node);
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* ExecScan
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1997-09-07 07:04:48 +02:00
|
|
|
* Scans the relation using the 'access method' indicated and
|
|
|
|
* returns the next qualifying tuple in the direction specified
|
|
|
|
* in the global variable ExecDirection.
|
2014-08-05 21:17:21 +02:00
|
|
|
* The access method returns the next tuple and ExecScan() is
|
2000-07-12 04:37:39 +02:00
|
|
|
* responsible for checking the tuple returned against the qual-clause.
|
1997-09-07 07:04:48 +02:00
|
|
|
*
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
* A 'recheck method' must also be provided that can check an
|
|
|
|
* arbitrary tuple of the relation against any qual conditions
|
|
|
|
* that are implemented internal to the access method.
|
|
|
|
*
|
1997-09-07 07:04:48 +02:00
|
|
|
* Conditions:
|
|
|
|
* -- the "cursor" maintained by the AMI is positioned at the tuple
|
|
|
|
* returned previously.
|
|
|
|
*
|
|
|
|
* Initial States:
|
|
|
|
* -- the relation indicated is opened for scanning so that the
|
|
|
|
* "cursor" is positioned before the first qualifying tuple.
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
TupleTableSlot *
|
2003-08-08 23:42:59 +02:00
|
|
|
ExecScan(ScanState *node,
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
ExecScanAccessMtd accessMtd, /* function returning a tuple */
|
|
|
|
ExecScanRecheckMtd recheckMtd)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2000-08-24 05:29:15 +02:00
|
|
|
ExprContext *econtext;
|
1997-09-08 04:41:22 +02:00
|
|
|
List *qual;
|
2003-02-03 16:07:08 +01:00
|
|
|
ProjectionInfo *projInfo;
|
2000-08-24 05:29:15 +02:00
|
|
|
ExprDoneCond isDone;
|
1997-09-07 07:04:48 +02:00
|
|
|
TupleTableSlot *resultSlot;
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* Fetch data from node
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2002-12-05 16:50:39 +01:00
|
|
|
qual = node->ps.qual;
|
2003-02-03 16:07:08 +01:00
|
|
|
projInfo = node->ps.ps_ProjInfo;
|
2011-05-21 20:30:11 +02:00
|
|
|
econtext = node->ps.ps_ExprContext;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2005-05-23 00:30:20 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If we have neither a qual to check nor a projection to do, just skip
|
|
|
|
* all the overhead and return the raw scan tuple.
|
2005-05-23 00:30:20 +02:00
|
|
|
*/
|
|
|
|
if (!qual && !projInfo)
|
2011-05-21 20:30:11 +02:00
|
|
|
{
|
|
|
|
ResetExprContext(econtext);
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
return ExecScanFetch(node, accessMtd, recheckMtd);
|
2011-05-21 20:30:11 +02:00
|
|
|
}
|
2005-05-23 00:30:20 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Check to see if we're still projecting out tuples from a previous scan
|
|
|
|
* tuple (because there is a function-returning-set in the projection
|
|
|
|
* expressions). If so, try to project another one.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2002-12-05 16:50:39 +01:00
|
|
|
if (node->ps.ps_TupFromTlist)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2003-02-03 16:07:08 +01:00
|
|
|
Assert(projInfo); /* can't get here if not projecting */
|
|
|
|
resultSlot = ExecProject(projInfo, &isDone);
|
2000-08-24 05:29:15 +02:00
|
|
|
if (isDone == ExprMultipleResult)
|
1997-09-07 07:04:48 +02:00
|
|
|
return resultSlot;
|
2000-07-12 04:37:39 +02:00
|
|
|
/* Done with that source tuple... */
|
2002-12-05 16:50:39 +01:00
|
|
|
node->ps.ps_TupFromTlist = false;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* Reset per-tuple memory context to free any expression evaluation
|
2005-10-15 04:49:52 +02:00
|
|
|
* storage allocated in the previous tuple cycle. Note this can't happen
|
|
|
|
* until we're done projecting out tuples from a scan tuple.
|
2000-08-24 05:29:15 +02:00
|
|
|
*/
|
|
|
|
ResetExprContext(econtext);
|
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* get a tuple from the access method. Loop until we obtain a tuple that
|
2005-10-15 04:49:52 +02:00
|
|
|
* passes the qualification.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
|
|
|
for (;;)
|
|
|
|
{
|
2000-07-12 04:37:39 +02:00
|
|
|
TupleTableSlot *slot;
|
|
|
|
|
2002-01-06 01:37:44 +01:00
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
slot = ExecScanFetch(node, accessMtd, recheckMtd);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* if the slot returned by the accessMtd contains NULL, then it means
|
|
|
|
* there is nothing more to scan so we just return an empty slot,
|
|
|
|
* being careful to use the projection result slot so it has correct
|
|
|
|
* tupleDesc.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
|
|
|
if (TupIsNull(slot))
|
1998-02-26 13:13:11 +01:00
|
|
|
{
|
2003-02-03 16:07:08 +01:00
|
|
|
if (projInfo)
|
2005-03-16 22:38:10 +01:00
|
|
|
return ExecClearTuple(projInfo->pi_slot);
|
2003-02-03 16:07:08 +01:00
|
|
|
else
|
|
|
|
return slot;
|
1998-02-26 13:13:11 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* place the current tuple into the expr context
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
|
|
|
econtext->ecxt_scantuple = slot;
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* check that the current tuple satisfies the qual-clause
|
2000-07-12 04:37:39 +02:00
|
|
|
*
|
2001-03-22 07:16:21 +01:00
|
|
|
* check for non-nil qual here to avoid a function call to ExecQual()
|
2005-10-15 04:49:52 +02:00
|
|
|
* when the qual is nil ... saves only a few cycles, but they add up
|
|
|
|
* ...
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2000-01-20 00:55:03 +01:00
|
|
|
if (!qual || ExecQual(qual, econtext, false))
|
2000-08-24 05:29:15 +02:00
|
|
|
{
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* Found a satisfactory scan tuple.
|
2000-08-24 05:29:15 +02:00
|
|
|
*/
|
2003-02-03 16:07:08 +01:00
|
|
|
if (projInfo)
|
2000-08-24 05:29:15 +02:00
|
|
|
{
|
2003-02-03 16:07:08 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Form a projection tuple, store it in the result tuple slot
|
|
|
|
* and return it --- unless we find we can project no tuples
|
|
|
|
* from this scan tuple, in which case continue scan.
|
2003-02-03 16:07:08 +01:00
|
|
|
*/
|
|
|
|
resultSlot = ExecProject(projInfo, &isDone);
|
|
|
|
if (isDone != ExprEndResult)
|
|
|
|
{
|
|
|
|
node->ps.ps_TupFromTlist = (isDone == ExprMultipleResult);
|
|
|
|
return resultSlot;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Here, we aren't projecting, so just return scan tuple.
|
|
|
|
*/
|
|
|
|
return slot;
|
2000-08-24 05:29:15 +02:00
|
|
|
}
|
|
|
|
}
|
2011-09-22 17:29:18 +02:00
|
|
|
else
|
|
|
|
InstrCountFiltered1(node, 1);
|
2000-07-12 04:37:39 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* Tuple fails qual, so free per-tuple memory and try again.
|
2000-07-12 04:37:39 +02:00
|
|
|
*/
|
|
|
|
ResetExprContext(econtext);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
2003-02-03 16:07:08 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ExecAssignScanProjectionInfo
|
|
|
|
* Set up projection info for a scan node, if necessary.
|
|
|
|
*
|
|
|
|
* We can avoid a projection step if the requested tlist exactly matches
|
|
|
|
* the underlying tuple type. If so, we just set ps_ProjInfo to NULL.
|
|
|
|
* Note that this case occurs not only for simple "SELECT * FROM ...", but
|
|
|
|
* also in most cases where there are joins or other processing nodes above
|
|
|
|
* the scan node, because the planner will preferentially generate a matching
|
|
|
|
* tlist.
|
|
|
|
*
|
|
|
|
* ExecAssignScanType must have been called already.
|
|
|
|
*/
|
|
|
|
void
|
2003-08-08 23:42:59 +02:00
|
|
|
ExecAssignScanProjectionInfo(ScanState *node)
|
2003-02-03 16:07:08 +01:00
|
|
|
{
|
2003-08-04 02:43:34 +02:00
|
|
|
Scan *scan = (Scan *) node->ps.plan;
|
2011-10-11 20:20:06 +02:00
|
|
|
Index varno;
|
|
|
|
|
|
|
|
/* Vars in an index-only scan's tlist should be INDEX_VAR */
|
|
|
|
if (IsA(scan, IndexOnlyScan))
|
|
|
|
varno = INDEX_VAR;
|
|
|
|
else
|
|
|
|
varno = scan->scanrelid;
|
2003-02-03 16:07:08 +01:00
|
|
|
|
2004-01-22 03:23:21 +01:00
|
|
|
if (tlist_matches_tupdesc(&node->ps,
|
|
|
|
scan->plan.targetlist,
|
2011-10-11 20:20:06 +02:00
|
|
|
varno,
|
2005-03-16 22:38:10 +01:00
|
|
|
node->ss_ScanTupleSlot->tts_tupleDescriptor))
|
2003-02-03 16:07:08 +01:00
|
|
|
node->ps.ps_ProjInfo = NULL;
|
|
|
|
else
|
2007-02-02 01:07:03 +01:00
|
|
|
ExecAssignProjectionInfo(&node->ps,
|
|
|
|
node->ss_ScanTupleSlot->tts_tupleDescriptor);
|
2003-02-03 16:07:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2004-01-22 03:23:21 +01:00
|
|
|
tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc)
|
2003-02-03 16:07:08 +01:00
|
|
|
{
|
2003-08-04 02:43:34 +02:00
|
|
|
int numattrs = tupdesc->natts;
|
|
|
|
int attrno;
|
2004-01-22 03:23:21 +01:00
|
|
|
bool hasoid;
|
2004-05-26 06:41:50 +02:00
|
|
|
ListCell *tlist_item = list_head(tlist);
|
2003-02-03 16:07:08 +01:00
|
|
|
|
2004-01-22 03:23:21 +01:00
|
|
|
/* Check the tlist attributes */
|
2003-02-03 16:07:08 +01:00
|
|
|
for (attrno = 1; attrno <= numattrs; attrno++)
|
|
|
|
{
|
|
|
|
Form_pg_attribute att_tup = tupdesc->attrs[attrno - 1];
|
2003-08-04 02:43:34 +02:00
|
|
|
Var *var;
|
2003-02-03 16:07:08 +01:00
|
|
|
|
2004-05-26 06:41:50 +02:00
|
|
|
if (tlist_item == NULL)
|
2003-02-03 16:07:08 +01:00
|
|
|
return false; /* tlist too short */
|
2004-05-26 06:41:50 +02:00
|
|
|
var = (Var *) ((TargetEntry *) lfirst(tlist_item))->expr;
|
2003-02-03 16:07:08 +01:00
|
|
|
if (!var || !IsA(var, Var))
|
|
|
|
return false; /* tlist item not a Var */
|
2007-02-02 01:07:03 +01:00
|
|
|
/* if these Asserts fail, planner messed up */
|
2003-02-03 16:07:08 +01:00
|
|
|
Assert(var->varno == varno);
|
2003-09-25 21:41:49 +02:00
|
|
|
Assert(var->varlevelsup == 0);
|
2003-02-03 16:07:08 +01:00
|
|
|
if (var->varattno != attrno)
|
|
|
|
return false; /* out of order */
|
2003-09-25 21:41:49 +02:00
|
|
|
if (att_tup->attisdropped)
|
|
|
|
return false; /* table contains dropped columns */
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2007-01-24 02:25:47 +01:00
|
|
|
/*
|
2007-11-15 22:14:46 +01:00
|
|
|
* Note: usually the Var's type should match the tupdesc exactly, but
|
|
|
|
* in situations involving unions of columns that have different
|
2007-01-24 02:25:47 +01:00
|
|
|
* typmods, the Var may have come from above the union and hence have
|
|
|
|
* typmod -1. This is a legitimate situation since the Var still
|
2007-11-15 22:14:46 +01:00
|
|
|
* describes the column, just not as exactly as the tupdesc does. We
|
|
|
|
* could change the planner to prevent it, but it'd then insert
|
2007-01-24 02:25:47 +01:00
|
|
|
* projection steps just to convert from specific typmod to typmod -1,
|
|
|
|
* which is pretty silly.
|
|
|
|
*/
|
2007-02-02 01:07:03 +01:00
|
|
|
if (var->vartype != att_tup->atttypid ||
|
|
|
|
(var->vartypmod != att_tup->atttypmod &&
|
|
|
|
var->vartypmod != -1))
|
|
|
|
return false; /* type mismatch */
|
2003-02-03 16:07:08 +01:00
|
|
|
|
2004-05-26 06:41:50 +02:00
|
|
|
tlist_item = lnext(tlist_item);
|
2003-02-03 16:07:08 +01:00
|
|
|
}
|
|
|
|
|
2004-05-26 06:41:50 +02:00
|
|
|
if (tlist_item)
|
2003-02-03 16:07:08 +01:00
|
|
|
return false; /* tlist too long */
|
|
|
|
|
2004-01-22 03:23:21 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If the plan context requires a particular hasoid setting, then that has
|
|
|
|
* to match, too.
|
2004-01-22 03:23:21 +01:00
|
|
|
*/
|
|
|
|
if (ExecContextForcesOids(ps, &hasoid) &&
|
|
|
|
hasoid != tupdesc->tdhasoid)
|
|
|
|
return false;
|
|
|
|
|
2003-02-03 16:07:08 +01:00
|
|
|
return true;
|
|
|
|
}
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ExecScanReScan
|
|
|
|
*
|
|
|
|
* This must be called within the ReScan function of any plan node type
|
|
|
|
* that uses ExecScan().
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecScanReScan(ScanState *node)
|
|
|
|
{
|
|
|
|
EState *estate = node->ps.state;
|
|
|
|
|
|
|
|
/* Stop projecting any tuples from SRFs in the targetlist */
|
|
|
|
node->ps.ps_TupFromTlist = false;
|
|
|
|
|
|
|
|
/* Rescan EvalPlanQual tuple if we're inside an EvalPlanQual recheck */
|
|
|
|
if (estate->es_epqScanDone != NULL)
|
|
|
|
{
|
|
|
|
Index scanrelid = ((Scan *) node->ps.plan)->scanrelid;
|
|
|
|
|
|
|
|
Assert(scanrelid > 0);
|
|
|
|
|
|
|
|
estate->es_epqScanDone[scanrelid - 1] = false;
|
|
|
|
}
|
|
|
|
}
|