2006-08-02 03:59:48 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* nodeValuesscan.c
|
|
|
|
* Support routines for scanning Values lists
|
2006-10-04 02:30:14 +02:00
|
|
|
* ("VALUES (...), (...), ..." in rangetable).
|
2006-08-02 03:59:48 +02:00
|
|
|
*
|
2010-01-02 17:58:17 +01:00
|
|
|
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
|
2006-08-02 03:59:48 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-07-12 19:01:06 +02:00
|
|
|
* $PostgreSQL: pgsql/src/backend/executor/nodeValuesscan.c,v 1.13 2010/07/12 17:01:05 tgl Exp $
|
2006-08-02 03:59:48 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* INTERFACE ROUTINES
|
|
|
|
* ExecValuesScan scans a values list.
|
|
|
|
* ExecValuesNext retrieve next tuple in sequential order.
|
|
|
|
* ExecInitValuesScan creates and initializes a valuesscan node.
|
|
|
|
* ExecEndValuesScan releases any storage allocated.
|
2010-07-12 19:01:06 +02:00
|
|
|
* ExecReScanValuesScan rescans the values list
|
2006-08-02 03:59:48 +02:00
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
#include "executor/executor.h"
|
|
|
|
#include "executor/nodeValuesscan.h"
|
|
|
|
#include "utils/memutils.h"
|
|
|
|
|
|
|
|
|
|
|
|
static TupleTableSlot *ValuesNext(ValuesScanState *node);
|
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* Scan Support
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ValuesNext
|
|
|
|
*
|
|
|
|
* This is a workhorse for ExecValuesScan
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
static TupleTableSlot *
|
|
|
|
ValuesNext(ValuesScanState *node)
|
|
|
|
{
|
|
|
|
TupleTableSlot *slot;
|
2006-10-04 02:30:14 +02:00
|
|
|
EState *estate;
|
|
|
|
ExprContext *econtext;
|
|
|
|
ScanDirection direction;
|
|
|
|
List *exprlist;
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get information from the estate and scan state
|
|
|
|
*/
|
|
|
|
estate = node->ss.ps.state;
|
|
|
|
direction = estate->es_direction;
|
|
|
|
slot = node->ss.ss_ScanTupleSlot;
|
2006-08-02 20:58:21 +02:00
|
|
|
econtext = node->rowcontext;
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the next tuple. Return NULL if no more tuples.
|
|
|
|
*/
|
|
|
|
if (ScanDirectionIsForward(direction))
|
|
|
|
{
|
|
|
|
if (node->curr_idx < node->array_len)
|
|
|
|
node->curr_idx++;
|
|
|
|
if (node->curr_idx < node->array_len)
|
|
|
|
exprlist = node->exprlists[node->curr_idx];
|
|
|
|
else
|
|
|
|
exprlist = NIL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (node->curr_idx >= 0)
|
|
|
|
node->curr_idx--;
|
|
|
|
if (node->curr_idx >= 0)
|
|
|
|
exprlist = node->exprlists[node->curr_idx];
|
|
|
|
else
|
|
|
|
exprlist = NIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Always clear the result slot; this is appropriate if we are at the end
|
|
|
|
* of the data, and if we're not, we still need it as the first step of
|
|
|
|
* the store-virtual-tuple protocol. It seems wise to clear the slot
|
2006-08-02 20:58:21 +02:00
|
|
|
* before we reset the context it might have pointers into.
|
2006-08-02 03:59:48 +02:00
|
|
|
*/
|
|
|
|
ExecClearTuple(slot);
|
|
|
|
|
2006-08-02 20:58:21 +02:00
|
|
|
if (exprlist)
|
2006-08-02 03:59:48 +02:00
|
|
|
{
|
2006-08-02 20:58:21 +02:00
|
|
|
MemoryContext oldContext;
|
|
|
|
List *exprstatelist;
|
|
|
|
Datum *values;
|
|
|
|
bool *isnull;
|
|
|
|
ListCell *lc;
|
|
|
|
int resind;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get rid of any prior cycle's leftovers. We use ReScanExprContext
|
|
|
|
* not just ResetExprContext because we want any registered shutdown
|
|
|
|
* callbacks to be called.
|
|
|
|
*/
|
|
|
|
ReScanExprContext(econtext);
|
|
|
|
|
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Build the expression eval state in the econtext's per-tuple memory.
|
|
|
|
* This is a tad unusual, but we want to delete the eval state again
|
|
|
|
* when we move to the next row, to avoid growth of memory
|
|
|
|
* requirements over a long values list.
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
|
|
|
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
|
|
|
|
|
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Pass NULL, not my plan node, because we don't want anything in this
|
|
|
|
* transient state linking into permanent state. The only possibility
|
|
|
|
* is a SubPlan, and there shouldn't be any (any subselects in the
|
|
|
|
* VALUES list should be InitPlans).
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
|
|
|
exprstatelist = (List *) ExecInitExpr((Expr *) exprlist, NULL);
|
|
|
|
|
|
|
|
/* parser should have checked all sublists are the same length */
|
|
|
|
Assert(list_length(exprstatelist) == slot->tts_tupleDescriptor->natts);
|
|
|
|
|
|
|
|
/*
|
2006-10-04 02:30:14 +02:00
|
|
|
* Compute the expressions and build a virtual result tuple. We
|
|
|
|
* already did ExecClearTuple(slot).
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
|
|
|
values = slot->tts_values;
|
|
|
|
isnull = slot->tts_isnull;
|
|
|
|
|
|
|
|
resind = 0;
|
|
|
|
foreach(lc, exprstatelist)
|
|
|
|
{
|
2006-10-04 02:30:14 +02:00
|
|
|
ExprState *estate = (ExprState *) lfirst(lc);
|
2006-08-02 20:58:21 +02:00
|
|
|
|
|
|
|
values[resind] = ExecEvalExpr(estate,
|
|
|
|
econtext,
|
|
|
|
&isnull[resind],
|
|
|
|
NULL);
|
|
|
|
resind++;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldContext);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And return the virtual tuple.
|
|
|
|
*/
|
|
|
|
ExecStoreVirtualTuple(slot);
|
2006-08-02 03:59:48 +02:00
|
|
|
}
|
|
|
|
|
2006-08-02 20:58:21 +02:00
|
|
|
return slot;
|
2006-08-02 03:59:48 +02:00
|
|
|
}
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
/*
|
|
|
|
* ValuesRecheck -- access method routine to recheck a tuple in EvalPlanQual
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
ValuesRecheck(ValuesScanState *node, TupleTableSlot *slot)
|
|
|
|
{
|
|
|
|
/* nothing to check */
|
|
|
|
return true;
|
|
|
|
}
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecValuesScan(node)
|
|
|
|
*
|
|
|
|
* Scans the values lists sequentially and returns the next qualifying
|
|
|
|
* tuple.
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
* We call the ExecScan() routine and pass it the appropriate
|
|
|
|
* access method functions.
|
2006-08-02 03:59:48 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
TupleTableSlot *
|
|
|
|
ExecValuesScan(ValuesScanState *node)
|
|
|
|
{
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
return ExecScan(&node->ss,
|
|
|
|
(ExecScanAccessMtd) ValuesNext,
|
|
|
|
(ExecScanRecheckMtd) ValuesRecheck);
|
2006-08-02 03:59:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecInitValuesScan
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
ValuesScanState *
|
|
|
|
ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
|
|
|
|
{
|
2006-10-04 02:30:14 +02:00
|
|
|
ValuesScanState *scanstate;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
ListCell *vtl;
|
|
|
|
int i;
|
|
|
|
PlanState *planstate;
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ValuesScan should not have any children.
|
|
|
|
*/
|
|
|
|
Assert(outerPlan(node) == NULL);
|
|
|
|
Assert(innerPlan(node) == NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* create new ScanState for node
|
|
|
|
*/
|
|
|
|
scanstate = makeNode(ValuesScanState);
|
|
|
|
scanstate->ss.ps.plan = (Plan *) node;
|
|
|
|
scanstate->ss.ps.state = estate;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Miscellaneous initialization
|
|
|
|
*/
|
|
|
|
planstate = &scanstate->ss.ps;
|
2006-08-02 20:58:21 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create expression contexts. We need two, one for per-sublist
|
2006-10-04 02:30:14 +02:00
|
|
|
* processing and one for execScan.c to use for quals and projections. We
|
|
|
|
* cheat a little by using ExecAssignExprContext() to build both.
|
2006-08-02 20:58:21 +02:00
|
|
|
*/
|
|
|
|
ExecAssignExprContext(estate, planstate);
|
|
|
|
scanstate->rowcontext = planstate->ps_ExprContext;
|
2006-08-02 03:59:48 +02:00
|
|
|
ExecAssignExprContext(estate, planstate);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tuple table initialization
|
|
|
|
*/
|
|
|
|
ExecInitResultTupleSlot(estate, &scanstate->ss.ps);
|
|
|
|
ExecInitScanTupleSlot(estate, &scanstate->ss);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize child expressions
|
|
|
|
*/
|
|
|
|
scanstate->ss.ps.targetlist = (List *)
|
|
|
|
ExecInitExpr((Expr *) node->scan.plan.targetlist,
|
|
|
|
(PlanState *) scanstate);
|
|
|
|
scanstate->ss.ps.qual = (List *)
|
|
|
|
ExecInitExpr((Expr *) node->scan.plan.qual,
|
|
|
|
(PlanState *) scanstate);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get info about values list
|
|
|
|
*/
|
2007-02-19 03:23:12 +01:00
|
|
|
tupdesc = ExecTypeFromExprList((List *) linitial(node->values_lists));
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
ExecAssignScanType(&scanstate->ss, tupdesc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Other node-specific setup
|
|
|
|
*/
|
|
|
|
scanstate->marked_idx = -1;
|
|
|
|
scanstate->curr_idx = -1;
|
2007-02-19 03:23:12 +01:00
|
|
|
scanstate->array_len = list_length(node->values_lists);
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/* convert list of sublists into array of sublists for easy addressing */
|
|
|
|
scanstate->exprlists = (List **)
|
|
|
|
palloc(scanstate->array_len * sizeof(List *));
|
|
|
|
i = 0;
|
2007-02-19 03:23:12 +01:00
|
|
|
foreach(vtl, node->values_lists)
|
2006-08-02 03:59:48 +02:00
|
|
|
{
|
|
|
|
scanstate->exprlists[i++] = (List *) lfirst(vtl);
|
|
|
|
}
|
|
|
|
|
|
|
|
scanstate->ss.ps.ps_TupFromTlist = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize result tuple type and projection info.
|
|
|
|
*/
|
|
|
|
ExecAssignResultTypeFromTL(&scanstate->ss.ps);
|
|
|
|
ExecAssignScanProjectionInfo(&scanstate->ss);
|
|
|
|
|
|
|
|
return scanstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecEndValuesScan
|
|
|
|
*
|
|
|
|
* frees any storage allocated through C routines.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecEndValuesScan(ValuesScanState *node)
|
|
|
|
{
|
|
|
|
/*
|
2006-08-02 20:58:21 +02:00
|
|
|
* Free both exprcontexts
|
2006-08-02 03:59:48 +02:00
|
|
|
*/
|
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
2006-08-02 20:58:21 +02:00
|
|
|
node->ss.ps.ps_ExprContext = node->rowcontext;
|
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* clean out the tuple table
|
|
|
|
*/
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
|
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecValuesMarkPos
|
|
|
|
*
|
|
|
|
* Marks scan position.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecValuesMarkPos(ValuesScanState *node)
|
|
|
|
{
|
|
|
|
node->marked_idx = node->curr_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecValuesRestrPos
|
|
|
|
*
|
|
|
|
* Restores scan position.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecValuesRestrPos(ValuesScanState *node)
|
|
|
|
{
|
|
|
|
node->curr_idx = node->marked_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
2010-07-12 19:01:06 +02:00
|
|
|
* ExecReScanValuesScan
|
2006-08-02 03:59:48 +02:00
|
|
|
*
|
|
|
|
* Rescans the relation.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
2010-07-12 19:01:06 +02:00
|
|
|
ExecReScanValuesScan(ValuesScanState *node)
|
2006-08-02 03:59:48 +02:00
|
|
|
{
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
|
|
|
|
ExecScanReScan(&node->ss);
|
2006-08-02 03:59:48 +02:00
|
|
|
|
|
|
|
node->curr_idx = -1;
|
|
|
|
}
|