2006-08-02 03:59:48 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* nodeValuesscan.c
|
|
|
|
* Support routines for scanning Values lists
|
2006-10-04 02:30:14 +02:00
|
|
|
* ("VALUES (...), (...), ..." in rangetable).
|
2006-08-02 03:59:48 +02:00
|
|
|
*
|
2018-01-03 05:30:12 +01:00
|
|
|
* Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
|
2006-08-02 03:59:48 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/executor/nodeValuesscan.c
|
2006-08-02 03:59:48 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* INTERFACE ROUTINES
|
|
|
|
* ExecValuesScan scans a values list.
|
|
|
|
* ExecValuesNext retrieve next tuple in sequential order.
|
|
|
|
* ExecInitValuesScan creates and initializes a valuesscan node.
|
|
|
|
* ExecEndValuesScan releases any storage allocated.
|
2010-07-12 19:01:06 +02:00
|
|
|
* ExecReScanValuesScan rescans the values list
|
2006-08-02 03:59:48 +02:00
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
#include "executor/executor.h"
|
|
|
|
#include "executor/nodeValuesscan.h"
|
2016-06-04 00:07:14 +02:00
|
|
|
#include "utils/expandeddatum.h"
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
|
|
|
|
static TupleTableSlot *ValuesNext(ValuesScanState *node);
|
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* Scan Support
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ValuesNext
|
|
|
|
*
|
|
|
|
* This is a workhorse for ExecValuesScan
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
static TupleTableSlot *
|
|
|
|
ValuesNext(ValuesScanState *node)
|
|
|
|
{
|
|
|
|
TupleTableSlot *slot;
|
2006-10-04 02:30:14 +02:00
|
|
|
EState *estate;
|
|
|
|
ExprContext *econtext;
|
|
|
|
ScanDirection direction;
|
|
|
|
List *exprlist;
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get information from the estate and scan state
|
|
|
|
*/
|
|
|
|
estate = node->ss.ps.state;
|
|
|
|
direction = estate->es_direction;
|
|
|
|
slot = node->ss.ss_ScanTupleSlot;
|
2006-08-02 20:58:21 +02:00
|
|
|
econtext = node->rowcontext;
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the next tuple. Return NULL if no more tuples.
|
|
|
|
*/
|
|
|
|
if (ScanDirectionIsForward(direction))
|
|
|
|
{
|
|
|
|
if (node->curr_idx < node->array_len)
|
|
|
|
node->curr_idx++;
|
|
|
|
if (node->curr_idx < node->array_len)
|
|
|
|
exprlist = node->exprlists[node->curr_idx];
|
|
|
|
else
|
|
|
|
exprlist = NIL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (node->curr_idx >= 0)
|
|
|
|
node->curr_idx--;
|
|
|
|
if (node->curr_idx >= 0)
|
|
|
|
exprlist = node->exprlists[node->curr_idx];
|
|
|
|
else
|
|
|
|
exprlist = NIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Always clear the result slot; this is appropriate if we are at the end
|
|
|
|
* of the data, and if we're not, we still need it as the first step of
|
|
|
|
* the store-virtual-tuple protocol. It seems wise to clear the slot
|
2006-08-02 20:58:21 +02:00
|
|
|
* before we reset the context it might have pointers into.
|
2006-08-02 03:59:48 +02:00
|
|
|
*/
|
|
|
|
ExecClearTuple(slot);
|
|
|
|
|
2006-08-02 20:58:21 +02:00
|
|
|
if (exprlist)
|
2006-08-02 03:59:48 +02:00
|
|
|
{
|
2006-08-02 20:58:21 +02:00
|
|
|
MemoryContext oldContext;
|
2017-11-25 20:15:48 +01:00
|
|
|
List *oldsubplans;
|
2006-08-02 20:58:21 +02:00
|
|
|
List *exprstatelist;
|
|
|
|
Datum *values;
|
|
|
|
bool *isnull;
|
|
|
|
ListCell *lc;
|
|
|
|
int resind;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get rid of any prior cycle's leftovers. We use ReScanExprContext
|
|
|
|
* not just ResetExprContext because we want any registered shutdown
|
|
|
|
* callbacks to be called.
|
|
|
|
*/
|
|
|
|
ReScanExprContext(econtext);
|
|
|
|
|
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Build the expression eval state in the econtext's per-tuple memory.
|
|
|
|
* This is a tad unusual, but we want to delete the eval state again
|
|
|
|
* when we move to the next row, to avoid growth of memory
|
|
|
|
* requirements over a long values list.
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
|
|
|
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
|
|
|
|
|
|
|
|
/*
|
2017-11-25 20:15:48 +01:00
|
|
|
* The expressions might contain SubPlans (this is currently only
|
|
|
|
* possible if there's a sub-select containing a LATERAL reference,
|
|
|
|
* otherwise sub-selects in a VALUES list should be InitPlans). Those
|
|
|
|
* subplans will want to hook themselves into our subPlan list, which
|
|
|
|
* would result in a corrupted list after we delete the eval state. We
|
|
|
|
* can work around this by saving and restoring the subPlan list.
|
|
|
|
* (There's no need for the functionality that would be enabled by
|
|
|
|
* having the list entries, since the SubPlans aren't going to be
|
|
|
|
* re-executed anyway.)
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
2017-11-25 20:15:48 +01:00
|
|
|
oldsubplans = node->ss.ps.subPlan;
|
|
|
|
node->ss.ps.subPlan = NIL;
|
|
|
|
|
|
|
|
exprstatelist = ExecInitExprList(exprlist, &node->ss.ps);
|
|
|
|
|
|
|
|
node->ss.ps.subPlan = oldsubplans;
|
2006-08-02 20:58:21 +02:00
|
|
|
|
|
|
|
/* parser should have checked all sublists are the same length */
|
|
|
|
Assert(list_length(exprstatelist) == slot->tts_tupleDescriptor->natts);
|
|
|
|
|
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Compute the expressions and build a virtual result tuple. We
|
|
|
|
* already did ExecClearTuple(slot).
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
|
|
|
values = slot->tts_values;
|
|
|
|
isnull = slot->tts_isnull;
|
|
|
|
|
|
|
|
resind = 0;
|
|
|
|
foreach(lc, exprstatelist)
|
|
|
|
{
|
2006-10-04 02:30:14 +02:00
|
|
|
ExprState *estate = (ExprState *) lfirst(lc);
|
2017-08-20 20:19:07 +02:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(slot->tts_tupleDescriptor,
|
|
|
|
resind);
|
2006-08-02 20:58:21 +02:00
|
|
|
|
|
|
|
values[resind] = ExecEvalExpr(estate,
|
|
|
|
econtext,
|
2017-01-19 23:12:38 +01:00
|
|
|
&isnull[resind]);
|
2016-06-04 00:07:14 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We must force any R/W expanded datums to read-only state, in
|
|
|
|
* case they are multiply referenced in the plan node's output
|
|
|
|
* expressions, or in case we skip the output projection and the
|
|
|
|
* output column is multiply referenced in higher plan nodes.
|
|
|
|
*/
|
|
|
|
values[resind] = MakeExpandedObjectReadOnly(values[resind],
|
|
|
|
isnull[resind],
|
2017-08-20 20:19:07 +02:00
|
|
|
attr->attlen);
|
2016-06-04 00:07:14 +02:00
|
|
|
|
2006-08-02 20:58:21 +02:00
|
|
|
resind++;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldContext);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And return the virtual tuple.
|
|
|
|
*/
|
|
|
|
ExecStoreVirtualTuple(slot);
|
2006-08-02 03:59:48 +02:00
|
|
|
}
|
|
|
|
|
2006-08-02 20:58:21 +02:00
|
|
|
return slot;
|
2006-08-02 03:59:48 +02:00
|
|
|
}
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
/*
|
|
|
|
* ValuesRecheck -- access method routine to recheck a tuple in EvalPlanQual
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
ValuesRecheck(ValuesScanState *node, TupleTableSlot *slot)
|
|
|
|
{
|
|
|
|
/* nothing to check */
|
|
|
|
return true;
|
|
|
|
}
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecValuesScan(node)
|
|
|
|
*
|
|
|
|
* Scans the values lists sequentially and returns the next qualifying
|
|
|
|
* tuple.
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
* We call the ExecScan() routine and pass it the appropriate
|
|
|
|
* access method functions.
|
2006-08-02 03:59:48 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2017-07-17 09:33:49 +02:00
|
|
|
static TupleTableSlot *
|
|
|
|
ExecValuesScan(PlanState *pstate)
|
2006-08-02 03:59:48 +02:00
|
|
|
{
|
2017-07-17 09:33:49 +02:00
|
|
|
ValuesScanState *node = castNode(ValuesScanState, pstate);
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
return ExecScan(&node->ss,
|
|
|
|
(ExecScanAccessMtd) ValuesNext,
|
|
|
|
(ExecScanRecheckMtd) ValuesRecheck);
|
2006-08-02 03:59:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecInitValuesScan
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
ValuesScanState *
|
|
|
|
ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
|
|
|
|
{
|
2006-10-04 02:30:14 +02:00
|
|
|
ValuesScanState *scanstate;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
ListCell *vtl;
|
|
|
|
int i;
|
|
|
|
PlanState *planstate;
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ValuesScan should not have any children.
|
|
|
|
*/
|
|
|
|
Assert(outerPlan(node) == NULL);
|
|
|
|
Assert(innerPlan(node) == NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* create new ScanState for node
|
|
|
|
*/
|
|
|
|
scanstate = makeNode(ValuesScanState);
|
|
|
|
scanstate->ss.ps.plan = (Plan *) node;
|
|
|
|
scanstate->ss.ps.state = estate;
|
2017-07-17 09:33:49 +02:00
|
|
|
scanstate->ss.ps.ExecProcNode = ExecValuesScan;
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Miscellaneous initialization
|
|
|
|
*/
|
|
|
|
planstate = &scanstate->ss.ps;
|
2006-08-02 20:58:21 +02:00
|
|
|
|
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Create expression contexts. We need two, one for per-sublist
|
2006-10-04 02:30:14 +02:00
|
|
|
* processing and one for execScan.c to use for quals and projections. We
|
|
|
|
* cheat a little by using ExecAssignExprContext() to build both.
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
|
|
|
ExecAssignExprContext(estate, planstate);
|
|
|
|
scanstate->rowcontext = planstate->ps_ExprContext;
|
2006-08-02 03:59:48 +02:00
|
|
|
ExecAssignExprContext(estate, planstate);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tuple table initialization
|
|
|
|
*/
|
|
|
|
ExecInitResultTupleSlot(estate, &scanstate->ss.ps);
|
|
|
|
ExecInitScanTupleSlot(estate, &scanstate->ss);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize child expressions
|
|
|
|
*/
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
|
|
|
scanstate->ss.ps.qual =
|
|
|
|
ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate);
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get info about values list
|
|
|
|
*/
|
Ensure that RowExprs and whole-row Vars produce the expected column names.
At one time it wasn't terribly important what column names were associated
with the fields of a composite Datum, but since the introduction of
operations like row_to_json(), it's important that looking up the rowtype
ID embedded in the Datum returns the column names that users would expect.
That did not work terribly well before this patch: you could get the column
names of the underlying table, or column aliases from any level of the
query, depending on minor details of the plan tree. You could even get
totally empty field names, which is disastrous for cases like row_to_json().
To fix this for whole-row Vars, look to the RTE referenced by the Var, and
make sure its column aliases are applied to the rowtype associated with
the result Datums. This is a tad scary because we might have to return
a transient RECORD type even though the Var is declared as having some
named rowtype. In principle it should be all right because the record
type will still be physically compatible with the named rowtype; but
I had to weaken one Assert in ExecEvalConvertRowtype, and there might be
third-party code containing similar assumptions.
Similarly, RowExprs have to be willing to override the column names coming
from a named composite result type and produce a RECORD when the column
aliases visible at the site of the RowExpr differ from the underlying
table's column names.
In passing, revert the decision made in commit 398f70ec070fe601 to add
an alias-list argument to ExecTypeFromExprList: better to provide that
functionality in a separate function. This also reverts most of the code
changes in d68581483564ec0f, which we don't need because we're no longer
depending on the tupdesc found in the child plan node's result slot to be
blessed.
Back-patch to 9.4, but not earlier, since this solution changes the results
in some cases that users might not have realized were buggy. We'll apply a
more restricted form of this patch in older branches.
2014-11-10 21:21:09 +01:00
|
|
|
tupdesc = ExecTypeFromExprList((List *) linitial(node->values_lists));
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
ExecAssignScanType(&scanstate->ss, tupdesc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Other node-specific setup
|
|
|
|
*/
|
|
|
|
scanstate->curr_idx = -1;
|
2007-02-19 03:23:12 +01:00
|
|
|
scanstate->array_len = list_length(node->values_lists);
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/* convert list of sublists into array of sublists for easy addressing */
|
|
|
|
scanstate->exprlists = (List **)
|
|
|
|
palloc(scanstate->array_len * sizeof(List *));
|
|
|
|
i = 0;
|
2007-02-19 03:23:12 +01:00
|
|
|
foreach(vtl, node->values_lists)
|
2006-08-02 03:59:48 +02:00
|
|
|
{
|
|
|
|
scanstate->exprlists[i++] = (List *) lfirst(vtl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize result tuple type and projection info.
|
|
|
|
*/
|
|
|
|
ExecAssignResultTypeFromTL(&scanstate->ss.ps);
|
|
|
|
ExecAssignScanProjectionInfo(&scanstate->ss);
|
|
|
|
|
|
|
|
return scanstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecEndValuesScan
|
|
|
|
*
|
|
|
|
* frees any storage allocated through C routines.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecEndValuesScan(ValuesScanState *node)
|
|
|
|
{
|
|
|
|
/*
|
2006-08-02 20:58:21 +02:00
|
|
|
* Free both exprcontexts
|
2006-08-02 03:59:48 +02:00
|
|
|
*/
|
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
2006-08-02 20:58:21 +02:00
|
|
|
node->ss.ps.ps_ExprContext = node->rowcontext;
|
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* clean out the tuple table
|
|
|
|
*/
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
|
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
2010-07-12 19:01:06 +02:00
|
|
|
* ExecReScanValuesScan
|
2006-08-02 03:59:48 +02:00
|
|
|
*
|
|
|
|
* Rescans the relation.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
2010-07-12 19:01:06 +02:00
|
|
|
ExecReScanValuesScan(ValuesScanState *node)
|
2006-08-02 03:59:48 +02:00
|
|
|
{
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
|
|
|
|
ExecScanReScan(&node->ss);
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
node->curr_idx = -1;
|
|
|
|
}
|