2008-10-04 23:56:55 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* nodeCtescan.c
|
|
|
|
* routines to handle CteScan nodes.
|
|
|
|
*
|
2020-01-01 18:21:45 +01:00
|
|
|
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
2008-10-04 23:56:55 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/executor/nodeCtescan.c
|
2008-10-04 23:56:55 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
#include "executor/execdebug.h"
|
|
|
|
#include "executor/nodeCtescan.h"
|
|
|
|
#include "miscadmin.h"
|
|
|
|
|
|
|
|
static TupleTableSlot *CteScanNext(CteScanState *node);
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* CteScanNext
|
|
|
|
*
|
|
|
|
* This is a workhorse for ExecCteScan
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
static TupleTableSlot *
|
|
|
|
CteScanNext(CteScanState *node)
|
|
|
|
{
|
|
|
|
EState *estate;
|
|
|
|
ScanDirection dir;
|
|
|
|
bool forward;
|
|
|
|
Tuplestorestate *tuplestorestate;
|
|
|
|
bool eof_tuplestore;
|
|
|
|
TupleTableSlot *slot;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get state info from node
|
|
|
|
*/
|
|
|
|
estate = node->ss.ps.state;
|
|
|
|
dir = estate->es_direction;
|
|
|
|
forward = ScanDirectionIsForward(dir);
|
|
|
|
tuplestorestate = node->leader->cte_table;
|
|
|
|
tuplestore_select_read_pointer(tuplestorestate, node->readptr);
|
|
|
|
slot = node->ss.ss_ScanTupleSlot;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are not at the end of the tuplestore, or are going backwards, try
|
|
|
|
* to fetch a tuple from tuplestore.
|
|
|
|
*/
|
|
|
|
eof_tuplestore = tuplestore_ateof(tuplestorestate);
|
|
|
|
|
|
|
|
if (!forward && eof_tuplestore)
|
|
|
|
{
|
|
|
|
if (!node->leader->eof_cte)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* When reversing direction at tuplestore EOF, the first
|
|
|
|
* gettupleslot call will fetch the last-added tuple; but we want
|
|
|
|
* to return the one before that, if possible. So do an extra
|
|
|
|
* fetch.
|
|
|
|
*/
|
|
|
|
if (!tuplestore_advance(tuplestorestate, forward))
|
|
|
|
return NULL; /* the tuplestore must be empty */
|
|
|
|
}
|
|
|
|
eof_tuplestore = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we can fetch another tuple from the tuplestore, return it.
|
2009-03-27 19:30:21 +01:00
|
|
|
*
|
|
|
|
* Note: we have to use copy=true in the tuplestore_gettupleslot call,
|
2009-06-11 16:49:15 +02:00
|
|
|
* because we are sharing the tuplestore with other nodes that might write
|
|
|
|
* into the tuplestore before we get called again.
|
2008-10-04 23:56:55 +02:00
|
|
|
*/
|
|
|
|
if (!eof_tuplestore)
|
|
|
|
{
|
2009-03-27 19:30:21 +01:00
|
|
|
if (tuplestore_gettupleslot(tuplestorestate, forward, true, slot))
|
2008-10-04 23:56:55 +02:00
|
|
|
return slot;
|
|
|
|
if (forward)
|
|
|
|
eof_tuplestore = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If necessary, try to fetch another row from the CTE query.
|
|
|
|
*
|
|
|
|
* Note: the eof_cte state variable exists to short-circuit further calls
|
|
|
|
* of the CTE plan. It's not optional, unfortunately, because some plan
|
|
|
|
* node types are not robust about being called again when they've already
|
|
|
|
* returned NULL.
|
|
|
|
*/
|
|
|
|
if (eof_tuplestore && !node->leader->eof_cte)
|
|
|
|
{
|
|
|
|
TupleTableSlot *cteslot;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can only get here with forward==true, so no need to worry about
|
|
|
|
* which direction the subplan will go.
|
|
|
|
*/
|
|
|
|
cteslot = ExecProcNode(node->cteplanstate);
|
|
|
|
if (TupIsNull(cteslot))
|
|
|
|
{
|
|
|
|
node->leader->eof_cte = true;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-02-19 22:00:18 +01:00
|
|
|
/*
|
|
|
|
* There are corner cases where the subplan could change which
|
|
|
|
* tuplestore read pointer is active, so be sure to reselect ours
|
|
|
|
* before storing the tuple we got.
|
|
|
|
*/
|
|
|
|
tuplestore_select_read_pointer(tuplestorestate, node->readptr);
|
|
|
|
|
2008-10-04 23:56:55 +02:00
|
|
|
/*
|
|
|
|
* Append a copy of the returned tuple to tuplestore. NOTE: because
|
|
|
|
* our read pointer is certainly in EOF state, its read position will
|
|
|
|
* move forward over the added tuple. This is what we want. Also,
|
2009-06-11 16:49:15 +02:00
|
|
|
* any other readers will *not* move past the new tuple, which is what
|
|
|
|
* they want.
|
2008-10-04 23:56:55 +02:00
|
|
|
*/
|
|
|
|
tuplestore_puttupleslot(tuplestorestate, cteslot);
|
|
|
|
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* We MUST copy the CTE query's output tuple into our own slot. This
|
|
|
|
* is because other CteScan nodes might advance the CTE query before
|
|
|
|
* we are called again, and our output tuple must stay stable over
|
|
|
|
* that.
|
2008-10-04 23:56:55 +02:00
|
|
|
*/
|
|
|
|
return ExecCopySlot(slot, cteslot);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Nothing left ...
|
|
|
|
*/
|
|
|
|
return ExecClearTuple(slot);
|
|
|
|
}
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
/*
|
|
|
|
* CteScanRecheck -- access method routine to recheck a tuple in EvalPlanQual
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
CteScanRecheck(CteScanState *node, TupleTableSlot *slot)
|
|
|
|
{
|
|
|
|
/* nothing to check */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-10-04 23:56:55 +02:00
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecCteScan(node)
|
|
|
|
*
|
|
|
|
* Scans the CTE sequentially and returns the next qualifying tuple.
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
* We call the ExecScan() routine and pass it the appropriate
|
|
|
|
* access method functions.
|
2008-10-04 23:56:55 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2017-07-17 09:33:49 +02:00
|
|
|
static TupleTableSlot *
|
|
|
|
ExecCteScan(PlanState *pstate)
|
2008-10-04 23:56:55 +02:00
|
|
|
{
|
2017-07-17 09:33:49 +02:00
|
|
|
CteScanState *node = castNode(CteScanState, pstate);
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
return ExecScan(&node->ss,
|
|
|
|
(ExecScanAccessMtd) CteScanNext,
|
|
|
|
(ExecScanRecheckMtd) CteScanRecheck);
|
2008-10-04 23:56:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecInitCteScan
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
CteScanState *
|
|
|
|
ExecInitCteScan(CteScan *node, EState *estate, int eflags)
|
|
|
|
{
|
|
|
|
CteScanState *scanstate;
|
|
|
|
ParamExecData *prmdata;
|
|
|
|
|
|
|
|
/* check for unsupported flags */
|
|
|
|
Assert(!(eflags & EXEC_FLAG_MARK));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the moment we have to force the tuplestore to allow REWIND, because
|
|
|
|
* we might be asked to rescan the CTE even though upper levels didn't
|
|
|
|
* tell us to be prepared to do it efficiently. Annoying, since this
|
|
|
|
* prevents truncation of the tuplestore. XXX FIXME
|
2018-02-19 22:00:18 +01:00
|
|
|
*
|
|
|
|
* Note: if we are in an EPQ recheck plan tree, it's likely that no access
|
|
|
|
* to the tuplestore is needed at all, making this even more annoying.
|
|
|
|
* It's not worth improving that as long as all the read pointers would
|
|
|
|
* have REWIND anyway, but if we ever improve this logic then that aspect
|
|
|
|
* should be considered too.
|
2008-10-04 23:56:55 +02:00
|
|
|
*/
|
|
|
|
eflags |= EXEC_FLAG_REWIND;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CteScan should not have any children.
|
|
|
|
*/
|
|
|
|
Assert(outerPlan(node) == NULL);
|
|
|
|
Assert(innerPlan(node) == NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* create new CteScanState for node
|
|
|
|
*/
|
|
|
|
scanstate = makeNode(CteScanState);
|
|
|
|
scanstate->ss.ps.plan = (Plan *) node;
|
|
|
|
scanstate->ss.ps.state = estate;
|
2017-07-17 09:33:49 +02:00
|
|
|
scanstate->ss.ps.ExecProcNode = ExecCteScan;
|
2008-10-04 23:56:55 +02:00
|
|
|
scanstate->eflags = eflags;
|
|
|
|
scanstate->cte_table = NULL;
|
|
|
|
scanstate->eof_cte = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the already-initialized plan for the CTE query.
|
|
|
|
*/
|
|
|
|
scanstate->cteplanstate = (PlanState *) list_nth(estate->es_subplanstates,
|
|
|
|
node->ctePlanId - 1);
|
|
|
|
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* The Param slot associated with the CTE query is used to hold a pointer
|
|
|
|
* to the CteState of the first CteScan node that initializes for this
|
|
|
|
* CTE. This node will be the one that holds the shared state for all the
|
2012-08-16 01:01:16 +02:00
|
|
|
* CTEs, particularly the shared tuplestore.
|
2008-10-04 23:56:55 +02:00
|
|
|
*/
|
|
|
|
prmdata = &(estate->es_param_exec_vals[node->cteParam]);
|
|
|
|
Assert(prmdata->execPlan == NULL);
|
|
|
|
Assert(!prmdata->isnull);
|
2017-01-27 01:47:03 +01:00
|
|
|
scanstate->leader = castNode(CteScanState, DatumGetPointer(prmdata->value));
|
2008-10-04 23:56:55 +02:00
|
|
|
if (scanstate->leader == NULL)
|
|
|
|
{
|
|
|
|
/* I am the leader */
|
|
|
|
prmdata->value = PointerGetDatum(scanstate);
|
|
|
|
scanstate->leader = scanstate;
|
|
|
|
scanstate->cte_table = tuplestore_begin_heap(true, false, work_mem);
|
|
|
|
tuplestore_set_eflags(scanstate->cte_table, scanstate->eflags);
|
|
|
|
scanstate->readptr = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Not the leader */
|
2016-09-22 17:34:44 +02:00
|
|
|
/* Create my own read pointer, and ensure it is at start */
|
2008-10-04 23:56:55 +02:00
|
|
|
scanstate->readptr =
|
|
|
|
tuplestore_alloc_read_pointer(scanstate->leader->cte_table,
|
|
|
|
scanstate->eflags);
|
2016-09-22 17:34:44 +02:00
|
|
|
tuplestore_select_read_pointer(scanstate->leader->cte_table,
|
|
|
|
scanstate->readptr);
|
|
|
|
tuplestore_rescan(scanstate->leader->cte_table);
|
2008-10-04 23:56:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Miscellaneous initialization
|
|
|
|
*
|
|
|
|
* create expression context for node
|
|
|
|
*/
|
|
|
|
ExecAssignExprContext(estate, &scanstate->ss.ps);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The scan tuple type (ie, the rowtype we expect to find in the work
|
|
|
|
* table) is the same as the result rowtype of the CTE query.
|
|
|
|
*/
|
2018-02-17 06:17:38 +01:00
|
|
|
ExecInitScanTupleSlot(estate, &scanstate->ss,
|
Introduce notion of different types of slots (without implementing them).
Upcoming work intends to allow pluggable ways to introduce new ways of
storing table data. Accessing those table access methods from the
executor requires TupleTableSlots to be carry tuples in the native
format of such storage methods; otherwise there'll be a significant
conversion overhead.
Different access methods will require different data to store tuples
efficiently (just like virtual, minimal, heap already require fields
in TupleTableSlot). To allow that without requiring additional pointer
indirections, we want to have different structs (embedding
TupleTableSlot) for different types of slots. Thus different types of
slots are needed, which requires adapting creators of slots.
The slot that most efficiently can represent a type of tuple in an
executor node will often depend on the type of slot a child node
uses. Therefore we need to track the type of slot is returned by
nodes, so parent slots can create slots based on that.
Relatedly, JIT compilation of tuple deforming needs to know which type
of slot a certain expression refers to, so it can create an
appropriate deforming function for the type of tuple in the slot.
But not all nodes will only return one type of slot, e.g. an append
node will potentially return different types of slots for each of its
subplans.
Therefore add function that allows to query the type of a node's
result slot, and whether it'll always be the same type (whether it's
fixed). This can be queried using ExecGetResultSlotOps().
The scan, result, inner, outer type of slots are automatically
inferred from ExecInitScanTupleSlot(), ExecInitResultSlot(),
left/right subtrees respectively. If that's not correct for a node,
that can be overwritten using new fields in PlanState.
This commit does not introduce the actually abstracted implementation
of different kind of TupleTableSlots, that will be left for a followup
commit. The different types of slots introduced will, for now, still
use the same backing implementation.
While this already partially invalidates the big comment in
tuptable.h, it seems to make more sense to update it later, when the
different TupleTableSlot implementations actually exist.
Author: Ashutosh Bapat and Andres Freund, with changes by Amit Khandekar
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-16 07:00:30 +01:00
|
|
|
ExecGetResultType(scanstate->cteplanstate),
|
|
|
|
&TTSOpsMinimalTuple);
|
2008-10-04 23:56:55 +02:00
|
|
|
|
|
|
|
/*
|
Don't require return slots for nodes without projection.
In a lot of nodes the return slot is not required. That can either be
because the node doesn't do any projection (say an Append node), or
because the node does perform projections but the projection is
optimized away because the projection would yield an identical row.
Slots aren't that small, especially for wide rows, so it's worthwhile
to avoid creating them. It's not possible to just skip creating the
slot - it's currently used to determine the tuple descriptor returned
by ExecGetResultType(). So separate the determination of the result
type from the slot creation. The work previously done internally
ExecInitResultTupleSlotTL() can now also be done separately with
ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that
aren't guaranteed to need a result slot, can use
ExecInitResultTypeTL() to determine the result type of the node, and
ExecAssignScanProjectionInfo() (via
ExecConditionalAssignProjectionInfo()) determines that a result slot
is needed, it is created with ExecInitResultSlot().
Besides the advantage of avoiding to create slots that then are
unused, this is necessary preparation for later patches around tuple
table slot abstraction. In particular separating the return descriptor
and slot is a prerequisite to allow JITing of tuple deforming with
knowledge of the underlying tuple format, and to avoid unnecessarily
creating JITed tuple deforming for virtual slots.
This commit removes a redundant argument from
ExecInitResultTupleSlotTL(). While this commit touches a lot of the
relevant lines anyway, it'd normally still not worthwhile to cause
breakage, except that aforementioned later commits will touch *all*
ExecInitResultTupleSlotTL() callers anyway (but fits worse
thematically).
Author: Andres Freund
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-10 02:19:39 +01:00
|
|
|
* Initialize result type and projection.
|
2008-10-04 23:56:55 +02:00
|
|
|
*/
|
Don't require return slots for nodes without projection.
In a lot of nodes the return slot is not required. That can either be
because the node doesn't do any projection (say an Append node), or
because the node does perform projections but the projection is
optimized away because the projection would yield an identical row.
Slots aren't that small, especially for wide rows, so it's worthwhile
to avoid creating them. It's not possible to just skip creating the
slot - it's currently used to determine the tuple descriptor returned
by ExecGetResultType(). So separate the determination of the result
type from the slot creation. The work previously done internally
ExecInitResultTupleSlotTL() can now also be done separately with
ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that
aren't guaranteed to need a result slot, can use
ExecInitResultTypeTL() to determine the result type of the node, and
ExecAssignScanProjectionInfo() (via
ExecConditionalAssignProjectionInfo()) determines that a result slot
is needed, it is created with ExecInitResultSlot().
Besides the advantage of avoiding to create slots that then are
unused, this is necessary preparation for later patches around tuple
table slot abstraction. In particular separating the return descriptor
and slot is a prerequisite to allow JITing of tuple deforming with
knowledge of the underlying tuple format, and to avoid unnecessarily
creating JITed tuple deforming for virtual slots.
This commit removes a redundant argument from
ExecInitResultTupleSlotTL(). While this commit touches a lot of the
relevant lines anyway, it'd normally still not worthwhile to cause
breakage, except that aforementioned later commits will touch *all*
ExecInitResultTupleSlotTL() callers anyway (but fits worse
thematically).
Author: Andres Freund
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-10 02:19:39 +01:00
|
|
|
ExecInitResultTypeTL(&scanstate->ss.ps);
|
2008-10-04 23:56:55 +02:00
|
|
|
ExecAssignScanProjectionInfo(&scanstate->ss);
|
|
|
|
|
2018-02-17 06:17:38 +01:00
|
|
|
/*
|
|
|
|
* initialize child expressions
|
|
|
|
*/
|
|
|
|
scanstate->ss.ps.qual =
|
|
|
|
ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate);
|
|
|
|
|
2008-10-04 23:56:55 +02:00
|
|
|
return scanstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecEndCteScan
|
|
|
|
*
|
|
|
|
* frees any storage allocated through C routines.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecEndCteScan(CteScanState *node)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Free exprcontext
|
|
|
|
*/
|
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* clean out the tuple table
|
|
|
|
*/
|
Don't require return slots for nodes without projection.
In a lot of nodes the return slot is not required. That can either be
because the node doesn't do any projection (say an Append node), or
because the node does perform projections but the projection is
optimized away because the projection would yield an identical row.
Slots aren't that small, especially for wide rows, so it's worthwhile
to avoid creating them. It's not possible to just skip creating the
slot - it's currently used to determine the tuple descriptor returned
by ExecGetResultType(). So separate the determination of the result
type from the slot creation. The work previously done internally
ExecInitResultTupleSlotTL() can now also be done separately with
ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that
aren't guaranteed to need a result slot, can use
ExecInitResultTypeTL() to determine the result type of the node, and
ExecAssignScanProjectionInfo() (via
ExecConditionalAssignProjectionInfo()) determines that a result slot
is needed, it is created with ExecInitResultSlot().
Besides the advantage of avoiding to create slots that then are
unused, this is necessary preparation for later patches around tuple
table slot abstraction. In particular separating the return descriptor
and slot is a prerequisite to allow JITing of tuple deforming with
knowledge of the underlying tuple format, and to avoid unnecessarily
creating JITed tuple deforming for virtual slots.
This commit removes a redundant argument from
ExecInitResultTupleSlotTL(). While this commit touches a lot of the
relevant lines anyway, it'd normally still not worthwhile to cause
breakage, except that aforementioned later commits will touch *all*
ExecInitResultTupleSlotTL() callers anyway (but fits worse
thematically).
Author: Andres Freund
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-10 02:19:39 +01:00
|
|
|
if (node->ss.ps.ps_ResultTupleSlot)
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
2008-10-04 23:56:55 +02:00
|
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If I am the leader, free the tuplestore.
|
|
|
|
*/
|
|
|
|
if (node->leader == node)
|
2012-08-16 01:01:16 +02:00
|
|
|
{
|
2008-10-04 23:56:55 +02:00
|
|
|
tuplestore_end(node->cte_table);
|
2012-08-16 01:01:16 +02:00
|
|
|
node->cte_table = NULL;
|
|
|
|
}
|
2008-10-04 23:56:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
2010-07-12 19:01:06 +02:00
|
|
|
* ExecReScanCteScan
|
2008-10-04 23:56:55 +02:00
|
|
|
*
|
|
|
|
* Rescans the relation.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
2010-07-12 19:01:06 +02:00
|
|
|
ExecReScanCteScan(CteScanState *node)
|
2008-10-04 23:56:55 +02:00
|
|
|
{
|
2008-10-23 17:29:23 +02:00
|
|
|
Tuplestorestate *tuplestorestate = node->leader->cte_table;
|
2008-10-04 23:56:55 +02:00
|
|
|
|
Don't require return slots for nodes without projection.
In a lot of nodes the return slot is not required. That can either be
because the node doesn't do any projection (say an Append node), or
because the node does perform projections but the projection is
optimized away because the projection would yield an identical row.
Slots aren't that small, especially for wide rows, so it's worthwhile
to avoid creating them. It's not possible to just skip creating the
slot - it's currently used to determine the tuple descriptor returned
by ExecGetResultType(). So separate the determination of the result
type from the slot creation. The work previously done internally
ExecInitResultTupleSlotTL() can now also be done separately with
ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that
aren't guaranteed to need a result slot, can use
ExecInitResultTypeTL() to determine the result type of the node, and
ExecAssignScanProjectionInfo() (via
ExecConditionalAssignProjectionInfo()) determines that a result slot
is needed, it is created with ExecInitResultSlot().
Besides the advantage of avoiding to create slots that then are
unused, this is necessary preparation for later patches around tuple
table slot abstraction. In particular separating the return descriptor
and slot is a prerequisite to allow JITing of tuple deforming with
knowledge of the underlying tuple format, and to avoid unnecessarily
creating JITed tuple deforming for virtual slots.
This commit removes a redundant argument from
ExecInitResultTupleSlotTL(). While this commit touches a lot of the
relevant lines anyway, it'd normally still not worthwhile to cause
breakage, except that aforementioned later commits will touch *all*
ExecInitResultTupleSlotTL() callers anyway (but fits worse
thematically).
Author: Andres Freund
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-10 02:19:39 +01:00
|
|
|
if (node->ss.ps.ps_ResultTupleSlot)
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
|
|
|
|
ExecScanReScan(&node->ss);
|
2008-10-04 23:56:55 +02:00
|
|
|
|
2012-08-16 01:01:16 +02:00
|
|
|
/*
|
|
|
|
* Clear the tuplestore if a new scan of the underlying CTE is required.
|
|
|
|
* This implicitly resets all the tuplestore's read pointers. Note that
|
|
|
|
* multiple CTE nodes might redundantly clear the tuplestore; that's OK,
|
|
|
|
* and not unduly expensive. We'll stop taking this path as soon as
|
|
|
|
* somebody has attempted to read something from the underlying CTE
|
|
|
|
* (thereby causing its chgParam to be cleared).
|
|
|
|
*/
|
|
|
|
if (node->leader->cteplanstate->chgParam != NULL)
|
2008-10-04 23:56:55 +02:00
|
|
|
{
|
2012-08-16 01:01:16 +02:00
|
|
|
tuplestore_clear(tuplestorestate);
|
|
|
|
node->leader->eof_cte = false;
|
2008-10-04 23:56:55 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-08-16 01:01:16 +02:00
|
|
|
/*
|
|
|
|
* Else, just rewind my own pointer. Either the underlying CTE
|
|
|
|
* doesn't need a rescan (and we can re-read what's in the tuplestore
|
|
|
|
* now), or somebody else already took care of it.
|
|
|
|
*/
|
2008-10-04 23:56:55 +02:00
|
|
|
tuplestore_select_read_pointer(tuplestorestate, node->readptr);
|
|
|
|
tuplestore_rescan(tuplestorestate);
|
|
|
|
}
|
|
|
|
}
|