1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* nodeSeqscan.c
|
1997-09-07 07:04:48 +02:00
|
|
|
* Support routines for sequential scans of relations.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2018-01-03 05:30:12 +01:00
|
|
|
* Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/executor/nodeSeqscan.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* INTERFACE ROUTINES
|
1997-09-07 07:04:48 +02:00
|
|
|
* ExecSeqScan sequentially scans a relation.
|
|
|
|
* ExecSeqNext retrieve next tuple in sequential order.
|
|
|
|
* ExecInitSeqScan creates and initializes a seqscan node.
|
|
|
|
* ExecEndSeqScan releases any storage allocated.
|
2010-07-12 19:01:06 +02:00
|
|
|
* ExecReScanSeqScan rescans the relation
|
2015-11-11 14:57:52 +01:00
|
|
|
*
|
|
|
|
* ExecSeqScanEstimate estimates DSM space needed for parallel scan
|
|
|
|
* ExecSeqScanInitializeDSM initialize DSM for parallel scan
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 19:18:16 +02:00
|
|
|
* ExecSeqScanReInitializeDSM reinitialize DSM for fresh parallel scan
|
2015-11-11 14:57:52 +01:00
|
|
|
* ExecSeqScanInitializeWorker attach to DSM info in parallel worker
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
1996-10-31 11:12:26 +01:00
|
|
|
#include "postgres.h"
|
|
|
|
|
2008-06-19 02:46:06 +02:00
|
|
|
#include "access/relscan.h"
|
1996-11-08 07:02:30 +01:00
|
|
|
#include "executor/execdebug.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
#include "executor/nodeSeqscan.h"
|
2011-02-23 18:18:09 +01:00
|
|
|
#include "utils/rel.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2003-08-08 23:42:59 +02:00
|
|
|
static TupleTableSlot *SeqNext(SeqScanState *node);
|
1997-08-19 23:40:56 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* Scan Support
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* SeqNext
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1997-09-07 07:04:48 +02:00
|
|
|
* This is a workhorse for ExecSeqScan
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
1997-08-19 23:40:56 +02:00
|
|
|
static TupleTableSlot *
|
2003-08-08 23:42:59 +02:00
|
|
|
SeqNext(SeqScanState *node)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
HeapTuple tuple;
|
|
|
|
HeapScanDesc scandesc;
|
|
|
|
EState *estate;
|
|
|
|
ScanDirection direction;
|
1997-09-07 07:04:48 +02:00
|
|
|
TupleTableSlot *slot;
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* get information from the estate and scan state
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2015-11-11 14:57:52 +01:00
|
|
|
scandesc = node->ss.ss_currentScanDesc;
|
|
|
|
estate = node->ss.ps.state;
|
1997-09-07 07:04:48 +02:00
|
|
|
direction = estate->es_direction;
|
2015-11-11 14:57:52 +01:00
|
|
|
slot = node->ss.ss_ScanTupleSlot;
|
|
|
|
|
|
|
|
if (scandesc == NULL)
|
|
|
|
{
|
|
|
|
/*
|
2018-07-19 15:08:09 +02:00
|
|
|
* We reach here if the scan is not parallel, or if we're serially
|
|
|
|
* executing a scan that was planned to be parallel.
|
2015-11-11 14:57:52 +01:00
|
|
|
*/
|
|
|
|
scandesc = heap_beginscan(node->ss.ss_currentRelation,
|
|
|
|
estate->es_snapshot,
|
|
|
|
0, NULL);
|
|
|
|
node->ss.ss_currentScanDesc = scandesc;
|
|
|
|
}
|
1999-01-29 10:23:17 +01:00
|
|
|
|
|
|
|
/*
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
* get the next tuple from the table
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2002-05-21 01:51:44 +02:00
|
|
|
tuple = heap_getnext(scandesc, direction);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* save the tuple and the buffer returned to us by the access methods in
|
|
|
|
* our scan tuple slot and return the slot. Note: we pass 'false' because
|
|
|
|
* tuples returned by heap_getnext() are pointers onto disk pages and were
|
|
|
|
* not created with palloc() and so should not be pfree()'d. Note also
|
2018-09-26 01:27:48 +02:00
|
|
|
* that ExecStoreHeapTuple will increment the refcount of the buffer; the
|
2005-10-15 04:49:52 +02:00
|
|
|
* refcount will not be dropped until the tuple table slot is cleared.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2005-03-16 22:38:10 +01:00
|
|
|
if (tuple)
|
2018-10-04 20:03:37 +02:00
|
|
|
ExecStoreBufferHeapTuple(tuple, /* tuple to store */
|
2018-09-26 01:27:48 +02:00
|
|
|
slot, /* slot to store in */
|
|
|
|
scandesc->rs_cbuf); /* buffer associated
|
|
|
|
* with this tuple */
|
2005-11-25 05:24:48 +01:00
|
|
|
else
|
|
|
|
ExecClearTuple(slot);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
return slot;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
/*
|
|
|
|
* SeqRecheck -- access method routine to recheck a tuple in EvalPlanQual
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
|
|
|
|
{
|
|
|
|
/*
|
2010-02-26 03:01:40 +01:00
|
|
|
* Note that unlike IndexScan, SeqScan never use keys in heap_beginscan
|
|
|
|
* (and this is very bad) - so, here we do not check are keys ok or not.
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* ExecSeqScan(node)
|
|
|
|
*
|
|
|
|
* Scans the relation sequentially and returns the next qualifying
|
|
|
|
* tuple.
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
* We call the ExecScan() routine and pass it the appropriate
|
|
|
|
* access method functions.
|
|
|
|
* ----------------------------------------------------------------
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2017-07-17 09:33:49 +02:00
|
|
|
static TupleTableSlot *
|
|
|
|
ExecSeqScan(PlanState *pstate)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2017-07-17 09:33:49 +02:00
|
|
|
SeqScanState *node = castNode(SeqScanState, pstate);
|
|
|
|
|
|
|
|
return ExecScan(&node->ss,
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
(ExecScanAccessMtd) SeqNext,
|
|
|
|
(ExecScanRecheckMtd) SeqRecheck);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* ExecInitSeqScan
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2002-12-05 16:50:39 +01:00
|
|
|
SeqScanState *
|
2006-02-28 05:10:28 +01:00
|
|
|
ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2002-12-05 16:50:39 +01:00
|
|
|
SeqScanState *scanstate;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Once upon a time it was possible to have an outerPlan of a SeqScan, but
|
|
|
|
* not any more.
|
2000-07-12 04:37:39 +02:00
|
|
|
*/
|
2002-12-05 16:50:39 +01:00
|
|
|
Assert(outerPlan(node) == NULL);
|
|
|
|
Assert(innerPlan(node) == NULL);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2002-12-05 16:50:39 +01:00
|
|
|
* create state structure
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2002-12-05 16:50:39 +01:00
|
|
|
scanstate = makeNode(SeqScanState);
|
2015-11-11 14:57:52 +01:00
|
|
|
scanstate->ss.ps.plan = (Plan *) node;
|
|
|
|
scanstate->ss.ps.state = estate;
|
2017-07-17 09:33:49 +02:00
|
|
|
scanstate->ss.ps.ExecProcNode = ExecSeqScan;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* Miscellaneous initialization
|
1997-09-07 07:04:48 +02:00
|
|
|
*
|
2001-03-22 07:16:21 +01:00
|
|
|
* create expression context for node
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2015-11-11 14:57:52 +01:00
|
|
|
ExecAssignExprContext(estate, &scanstate->ss.ps);
|
2002-12-05 16:50:39 +01:00
|
|
|
|
|
|
|
/*
|
2018-10-06 21:49:37 +02:00
|
|
|
* open the scan relation
|
2002-12-05 16:50:39 +01:00
|
|
|
*/
|
2018-02-17 06:17:38 +01:00
|
|
|
scanstate->ss.ss_currentRelation =
|
|
|
|
ExecOpenScanRelation(estate,
|
|
|
|
node->scanrelid,
|
|
|
|
eflags);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2018-02-17 06:17:38 +01:00
|
|
|
/* and create slot with the appropriate rowtype */
|
|
|
|
ExecInitScanTupleSlot(estate, &scanstate->ss,
|
|
|
|
RelationGetDescr(scanstate->ss.ss_currentRelation));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
Don't require return slots for nodes without projection.
In a lot of nodes the return slot is not required. That can either be
because the node doesn't do any projection (say an Append node), or
because the node does perform projections but the projection is
optimized away because the projection would yield an identical row.
Slots aren't that small, especially for wide rows, so it's worthwhile
to avoid creating them. It's not possible to just skip creating the
slot - it's currently used to determine the tuple descriptor returned
by ExecGetResultType(). So separate the determination of the result
type from the slot creation. The work previously done internally
ExecInitResultTupleSlotTL() can now also be done separately with
ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that
aren't guaranteed to need a result slot, can use
ExecInitResultTypeTL() to determine the result type of the node, and
ExecAssignScanProjectionInfo() (via
ExecConditionalAssignProjectionInfo()) determines that a result slot
is needed, it is created with ExecInitResultSlot().
Besides the advantage of avoiding to create slots that then are
unused, this is necessary preparation for later patches around tuple
table slot abstraction. In particular separating the return descriptor
and slot is a prerequisite to allow JITing of tuple deforming with
knowledge of the underlying tuple format, and to avoid unnecessarily
creating JITed tuple deforming for virtual slots.
This commit removes a redundant argument from
ExecInitResultTupleSlotTL(). While this commit touches a lot of the
relevant lines anyway, it'd normally still not worthwhile to cause
breakage, except that aforementioned later commits will touch *all*
ExecInitResultTupleSlotTL() callers anyway (but fits worse
thematically).
Author: Andres Freund
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-10 02:19:39 +01:00
|
|
|
* Initialize result type and projection.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Don't require return slots for nodes without projection.
In a lot of nodes the return slot is not required. That can either be
because the node doesn't do any projection (say an Append node), or
because the node does perform projections but the projection is
optimized away because the projection would yield an identical row.
Slots aren't that small, especially for wide rows, so it's worthwhile
to avoid creating them. It's not possible to just skip creating the
slot - it's currently used to determine the tuple descriptor returned
by ExecGetResultType(). So separate the determination of the result
type from the slot creation. The work previously done internally
ExecInitResultTupleSlotTL() can now also be done separately with
ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that
aren't guaranteed to need a result slot, can use
ExecInitResultTypeTL() to determine the result type of the node, and
ExecAssignScanProjectionInfo() (via
ExecConditionalAssignProjectionInfo()) determines that a result slot
is needed, it is created with ExecInitResultSlot().
Besides the advantage of avoiding to create slots that then are
unused, this is necessary preparation for later patches around tuple
table slot abstraction. In particular separating the return descriptor
and slot is a prerequisite to allow JITing of tuple deforming with
knowledge of the underlying tuple format, and to avoid unnecessarily
creating JITed tuple deforming for virtual slots.
This commit removes a redundant argument from
ExecInitResultTupleSlotTL(). While this commit touches a lot of the
relevant lines anyway, it'd normally still not worthwhile to cause
breakage, except that aforementioned later commits will touch *all*
ExecInitResultTupleSlotTL() callers anyway (but fits worse
thematically).
Author: Andres Freund
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-10 02:19:39 +01:00
|
|
|
ExecInitResultTypeTL(&scanstate->ss.ps);
|
2018-02-17 06:17:38 +01:00
|
|
|
ExecAssignScanProjectionInfo(&scanstate->ss);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2018-02-17 06:17:38 +01:00
|
|
|
* initialize child expressions
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2018-02-17 06:17:38 +01:00
|
|
|
scanstate->ss.ps.qual =
|
|
|
|
ExecInitQual(node->plan.qual, (PlanState *) scanstate);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-12-05 16:50:39 +01:00
|
|
|
return scanstate;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* ExecEndSeqScan
|
|
|
|
*
|
|
|
|
* frees any storage allocated through C routines.
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
2003-08-08 23:42:59 +02:00
|
|
|
ExecEndSeqScan(SeqScanState *node)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2002-02-19 21:11:20 +01:00
|
|
|
HeapScanDesc scanDesc;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* get information from node
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2015-11-11 14:57:52 +01:00
|
|
|
scanDesc = node->ss.ss_currentScanDesc;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2002-12-15 17:17:59 +01:00
|
|
|
* Free the exprcontext
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2015-11-11 14:57:52 +01:00
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-12-05 16:50:39 +01:00
|
|
|
/*
|
|
|
|
* clean out the tuple table
|
|
|
|
*/
|
Don't require return slots for nodes without projection.
In a lot of nodes the return slot is not required. That can either be
because the node doesn't do any projection (say an Append node), or
because the node does perform projections but the projection is
optimized away because the projection would yield an identical row.
Slots aren't that small, especially for wide rows, so it's worthwhile
to avoid creating them. It's not possible to just skip creating the
slot - it's currently used to determine the tuple descriptor returned
by ExecGetResultType(). So separate the determination of the result
type from the slot creation. The work previously done internally
ExecInitResultTupleSlotTL() can now also be done separately with
ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that
aren't guaranteed to need a result slot, can use
ExecInitResultTypeTL() to determine the result type of the node, and
ExecAssignScanProjectionInfo() (via
ExecConditionalAssignProjectionInfo()) determines that a result slot
is needed, it is created with ExecInitResultSlot().
Besides the advantage of avoiding to create slots that then are
unused, this is necessary preparation for later patches around tuple
table slot abstraction. In particular separating the return descriptor
and slot is a prerequisite to allow JITing of tuple deforming with
knowledge of the underlying tuple format, and to avoid unnecessarily
creating JITed tuple deforming for virtual slots.
This commit removes a redundant argument from
ExecInitResultTupleSlotTL(). While this commit touches a lot of the
relevant lines anyway, it'd normally still not worthwhile to cause
breakage, except that aforementioned later commits will touch *all*
ExecInitResultTupleSlotTL() callers anyway (but fits worse
thematically).
Author: Andres Freund
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-10 02:19:39 +01:00
|
|
|
if (node->ss.ps.ps_ResultTupleSlot)
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
2015-11-11 14:57:52 +01:00
|
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
2002-12-05 16:50:39 +01:00
|
|
|
|
2002-12-15 17:17:59 +01:00
|
|
|
/*
|
|
|
|
* close heap scan
|
|
|
|
*/
|
2015-11-11 14:57:52 +01:00
|
|
|
if (scanDesc != NULL)
|
|
|
|
heap_endscan(scanDesc);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* Join Support
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2000-07-12 04:37:39 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------------------------------------------------------
|
2010-07-12 19:01:06 +02:00
|
|
|
* ExecReScanSeqScan
|
1997-09-07 07:04:48 +02:00
|
|
|
*
|
|
|
|
* Rescans the relation.
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
2010-07-12 19:01:06 +02:00
|
|
|
ExecReScanSeqScan(SeqScanState *node)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1998-08-19 04:04:17 +02:00
|
|
|
HeapScanDesc scan;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2015-11-11 14:57:52 +01:00
|
|
|
scan = node->ss.ss_currentScanDesc;
|
2002-02-19 21:11:20 +01:00
|
|
|
|
2015-11-11 14:57:52 +01:00
|
|
|
if (scan != NULL)
|
2016-06-10 00:02:36 +02:00
|
|
|
heap_rescan(scan, /* scan desc */
|
|
|
|
NULL); /* new scan keys */
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-26 03:26:45 +01:00
|
|
|
|
|
|
|
ExecScanReScan((ScanState *) node);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
2015-11-11 14:57:52 +01:00
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* Parallel Scan Support
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecSeqScanEstimate
|
|
|
|
*
|
2017-10-28 11:50:22 +02:00
|
|
|
* Compute the amount of space we'll need in the parallel
|
|
|
|
* query DSM, and inform pcxt->estimator about our needs.
|
2015-11-11 14:57:52 +01:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecSeqScanEstimate(SeqScanState *node,
|
|
|
|
ParallelContext *pcxt)
|
|
|
|
{
|
|
|
|
EState *estate = node->ss.ps.state;
|
|
|
|
|
|
|
|
node->pscan_len = heap_parallelscan_estimate(estate->es_snapshot);
|
|
|
|
shm_toc_estimate_chunk(&pcxt->estimator, node->pscan_len);
|
|
|
|
shm_toc_estimate_keys(&pcxt->estimator, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecSeqScanInitializeDSM
|
|
|
|
*
|
|
|
|
* Set up a parallel heap scan descriptor.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecSeqScanInitializeDSM(SeqScanState *node,
|
|
|
|
ParallelContext *pcxt)
|
|
|
|
{
|
|
|
|
EState *estate = node->ss.ps.state;
|
2016-06-10 00:02:36 +02:00
|
|
|
ParallelHeapScanDesc pscan;
|
2015-11-11 14:57:52 +01:00
|
|
|
|
|
|
|
pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
|
|
|
|
heap_parallelscan_initialize(pscan,
|
|
|
|
node->ss.ss_currentRelation,
|
|
|
|
estate->es_snapshot);
|
|
|
|
shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan);
|
|
|
|
node->ss.ss_currentScanDesc =
|
|
|
|
heap_beginscan_parallel(node->ss.ss_currentRelation, pscan);
|
|
|
|
}
|
|
|
|
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 19:18:16 +02:00
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecSeqScanReInitializeDSM
|
|
|
|
*
|
|
|
|
* Reset shared state before beginning a fresh scan.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ExecSeqScanReInitializeDSM(SeqScanState *node,
|
|
|
|
ParallelContext *pcxt)
|
|
|
|
{
|
|
|
|
HeapScanDesc scan = node->ss.ss_currentScanDesc;
|
|
|
|
|
|
|
|
heap_parallelscan_reinitialize(scan->rs_parallel);
|
|
|
|
}
|
|
|
|
|
2015-11-11 14:57:52 +01:00
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* ExecSeqScanInitializeWorker
|
|
|
|
*
|
|
|
|
* Copy relevant information from TOC into planstate.
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
2017-11-17 02:28:11 +01:00
|
|
|
ExecSeqScanInitializeWorker(SeqScanState *node,
|
|
|
|
ParallelWorkerContext *pwcxt)
|
2015-11-11 14:57:52 +01:00
|
|
|
{
|
2016-06-10 00:02:36 +02:00
|
|
|
ParallelHeapScanDesc pscan;
|
2015-11-11 14:57:52 +01:00
|
|
|
|
2017-11-17 02:28:11 +01:00
|
|
|
pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
|
2015-11-11 14:57:52 +01:00
|
|
|
node->ss.ss_currentScanDesc =
|
|
|
|
heap_beginscan_parallel(node->ss.ss_currentRelation, pscan);
|
|
|
|
}
|