postgresql/src/backend/executor/nodeGroup.c

256 lines
6.1 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* nodeGroup.c
* Routines to handle group nodes (used for queries with GROUP BY clause).
*
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* DESCRIPTION
* The Group node is designed for handling queries with a GROUP BY clause.
* Its outer plan must deliver tuples that are sorted in the order
* specified by the grouping columns (ie. tuples from the same group are
* consecutive). That way, we just have to compare adjacent tuples to
* locate group boundaries.
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/backend/executor/nodeGroup.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "executor/executor.h"
#include "executor/nodeGroup.h"
#include "miscadmin.h"
#include "utils/memutils.h"
/*
* ExecGroup -
*
* Return one tuple for each group of matching input tuples.
*/
static TupleTableSlot *
ExecGroup(PlanState *pstate)
{
GroupState *node = castNode(GroupState, pstate);
ExprContext *econtext;
TupleTableSlot *firsttupleslot;
TupleTableSlot *outerslot;
CHECK_FOR_INTERRUPTS();
/*
* get state info from node
*/
if (node->grp_done)
return NULL;
econtext = node->ss.ps.ps_ExprContext;
/*
* The ScanTupleSlot holds the (copied) first tuple of each group.
*/
firsttupleslot = node->ss.ss_ScanTupleSlot;
/*
* We need not call ResetExprContext here because ExecQualAndReset() will
2001-03-22 05:01:46 +01:00
* reset the per-tuple memory context once per input tuple.
*/
/*
2005-10-15 04:49:52 +02:00
* If first time through, acquire first input tuple and determine whether
* to return it or not.
*/
if (TupIsNull(firsttupleslot))
{
outerslot = ExecProcNode(outerPlanState(node));
1998-11-27 20:52:36 +01:00
if (TupIsNull(outerslot))
{
/* empty input, so return nothing */
node->grp_done = true;
return NULL;
}
/* Copy tuple into firsttupleslot */
ExecCopySlot(firsttupleslot, outerslot);
/*
* Set it up as input for qual test and projection. The expressions
* will access the input tuple as varno OUTER.
*/
econtext->ecxt_outertuple = firsttupleslot;
2005-10-15 04:49:52 +02:00
/*
2005-10-15 04:49:52 +02:00
* Check the qual (HAVING clause); if the group does not match, ignore
* it and fall into scan loop.
*/
Faster expression evaluation and targetlist projection. This replaces the old, recursive tree-walk based evaluation, with non-recursive, opcode dispatch based, expression evaluation. Projection is now implemented as part of expression evaluation. This both leads to significant performance improvements, and makes future just-in-time compilation of expressions easier. The speed gains primarily come from: - non-recursive implementation reduces stack usage / overhead - simple sub-expressions are implemented with a single jump, without function calls - sharing some state between different sub-expressions - reduced amount of indirect/hard to predict memory accesses by laying out operation metadata sequentially; including the avoidance of nearly all of the previously used linked lists - more code has been moved to expression initialization, avoiding constant re-checks at evaluation time Future just-in-time compilation (JIT) has become easier, as demonstrated by released patches intended to be merged in a later release, for primarily two reasons: Firstly, due to a stricter split between expression initialization and evaluation, less code has to be handled by the JIT. Secondly, due to the non-recursive nature of the generated "instructions", less performance-critical code-paths can easily be shared between interpreted and compiled evaluation. The new framework allows for significant future optimizations. E.g.: - basic infrastructure for to later reduce the per executor-startup overhead of expression evaluation, by caching state in prepared statements. That'd be helpful in OLTPish scenarios where initialization overhead is measurable. - optimizing the generated "code". A number of proposals for potential work has already been made. - optimizing the interpreter. Similarly a number of proposals have been made here too. The move of logic into the expression initialization step leads to some backward-incompatible changes: - Function permission checks are now done during expression initialization, whereas previously they were done during execution. In edge cases this can lead to errors being raised that previously wouldn't have been, e.g. a NULL array being coerced to a different array type previously didn't perform checks. - The set of domain constraints to be checked, is now evaluated once during expression initialization, previously it was re-built every time a domain check was evaluated. For normal queries this doesn't change much, but e.g. for plpgsql functions, which caches ExprStates, the old set could stick around longer. The behavior around might still change. Author: Andres Freund, with significant changes by Tom Lane, changes by Heikki Linnakangas Reviewed-By: Tom Lane, Heikki Linnakangas Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
if (ExecQual(node->ss.ps.qual, econtext))
{
/*
2005-10-15 04:49:52 +02:00
* Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo);
}
else
InstrCountFiltered1(node, 1);
}
/*
* This loop iterates once per input tuple group. At the head of the
2005-10-15 04:49:52 +02:00
* loop, we have finished processing the first tuple of the group and now
* need to scan over all the other group members.
*/
for (;;)
{
/*
* Scan over all remaining tuples that belong to this group
*/
for (;;)
{
outerslot = ExecProcNode(outerPlanState(node));
if (TupIsNull(outerslot))
{
/* no more groups, so we're done */
node->grp_done = true;
return NULL;
}
/*
* Compare with first tuple and see if this tuple is of the same
* group. If so, ignore it and keep scanning.
*/
econtext->ecxt_innertuple = firsttupleslot;
econtext->ecxt_outertuple = outerslot;
if (!ExecQualAndReset(node->eqfunction, econtext))
break;
}
2005-10-15 04:49:52 +02:00
/*
2005-10-15 04:49:52 +02:00
* We have the first tuple of the next input group. See if we want to
* return it.
*/
/* Copy tuple, set up as input for qual test and projection */
ExecCopySlot(firsttupleslot, outerslot);
econtext->ecxt_outertuple = firsttupleslot;
2005-10-15 04:49:52 +02:00
/*
2005-10-15 04:49:52 +02:00
* Check the qual (HAVING clause); if the group does not match, ignore
* it and loop back to scan the rest of the group.
*/
Faster expression evaluation and targetlist projection. This replaces the old, recursive tree-walk based evaluation, with non-recursive, opcode dispatch based, expression evaluation. Projection is now implemented as part of expression evaluation. This both leads to significant performance improvements, and makes future just-in-time compilation of expressions easier. The speed gains primarily come from: - non-recursive implementation reduces stack usage / overhead - simple sub-expressions are implemented with a single jump, without function calls - sharing some state between different sub-expressions - reduced amount of indirect/hard to predict memory accesses by laying out operation metadata sequentially; including the avoidance of nearly all of the previously used linked lists - more code has been moved to expression initialization, avoiding constant re-checks at evaluation time Future just-in-time compilation (JIT) has become easier, as demonstrated by released patches intended to be merged in a later release, for primarily two reasons: Firstly, due to a stricter split between expression initialization and evaluation, less code has to be handled by the JIT. Secondly, due to the non-recursive nature of the generated "instructions", less performance-critical code-paths can easily be shared between interpreted and compiled evaluation. The new framework allows for significant future optimizations. E.g.: - basic infrastructure for to later reduce the per executor-startup overhead of expression evaluation, by caching state in prepared statements. That'd be helpful in OLTPish scenarios where initialization overhead is measurable. - optimizing the generated "code". A number of proposals for potential work has already been made. - optimizing the interpreter. Similarly a number of proposals have been made here too. The move of logic into the expression initialization step leads to some backward-incompatible changes: - Function permission checks are now done during expression initialization, whereas previously they were done during execution. In edge cases this can lead to errors being raised that previously wouldn't have been, e.g. a NULL array being coerced to a different array type previously didn't perform checks. - The set of domain constraints to be checked, is now evaluated once during expression initialization, previously it was re-built every time a domain check was evaluated. For normal queries this doesn't change much, but e.g. for plpgsql functions, which caches ExprStates, the old set could stick around longer. The behavior around might still change. Author: Andres Freund, with significant changes by Tom Lane, changes by Heikki Linnakangas Reviewed-By: Tom Lane, Heikki Linnakangas Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
if (ExecQual(node->ss.ps.qual, econtext))
{
/*
2005-10-15 04:49:52 +02:00
* Form and return a projection tuple using the first input tuple.
*/
return ExecProject(node->ss.ps.ps_ProjInfo);
}
else
InstrCountFiltered1(node, 1);
}
}
/* -----------------
* ExecInitGroup
*
* Creates the run-time information for the group node produced by the
* planner and initializes its outer subtree
* -----------------
*/
GroupState *
ExecInitGroup(Group *node, EState *estate, int eflags)
{
GroupState *grpstate;
Introduce notion of different types of slots (without implementing them). Upcoming work intends to allow pluggable ways to introduce new ways of storing table data. Accessing those table access methods from the executor requires TupleTableSlots to be carry tuples in the native format of such storage methods; otherwise there'll be a significant conversion overhead. Different access methods will require different data to store tuples efficiently (just like virtual, minimal, heap already require fields in TupleTableSlot). To allow that without requiring additional pointer indirections, we want to have different structs (embedding TupleTableSlot) for different types of slots. Thus different types of slots are needed, which requires adapting creators of slots. The slot that most efficiently can represent a type of tuple in an executor node will often depend on the type of slot a child node uses. Therefore we need to track the type of slot is returned by nodes, so parent slots can create slots based on that. Relatedly, JIT compilation of tuple deforming needs to know which type of slot a certain expression refers to, so it can create an appropriate deforming function for the type of tuple in the slot. But not all nodes will only return one type of slot, e.g. an append node will potentially return different types of slots for each of its subplans. Therefore add function that allows to query the type of a node's result slot, and whether it'll always be the same type (whether it's fixed). This can be queried using ExecGetResultSlotOps(). The scan, result, inner, outer type of slots are automatically inferred from ExecInitScanTupleSlot(), ExecInitResultSlot(), left/right subtrees respectively. If that's not correct for a node, that can be overwritten using new fields in PlanState. This commit does not introduce the actually abstracted implementation of different kind of TupleTableSlots, that will be left for a followup commit. The different types of slots introduced will, for now, still use the same backing implementation. While this already partially invalidates the big comment in tuptable.h, it seems to make more sense to update it later, when the different TupleTableSlot implementations actually exist. Author: Ashutosh Bapat and Andres Freund, with changes by Amit Khandekar Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-16 07:00:30 +01:00
const TupleTableSlotOps *tts_ops;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
* create state structure
*/
grpstate = makeNode(GroupState);
grpstate->ss.ps.plan = (Plan *) node;
grpstate->ss.ps.state = estate;
grpstate->ss.ps.ExecProcNode = ExecGroup;
grpstate->grp_done = false;
/*
* create expression context
*/
ExecAssignExprContext(estate, &grpstate->ss.ps);
/*
* initialize child nodes
*/
outerPlanState(grpstate) = ExecInitNode(outerPlan(node), estate, eflags);
/*
* Initialize scan slot and type.
*/
Introduce notion of different types of slots (without implementing them). Upcoming work intends to allow pluggable ways to introduce new ways of storing table data. Accessing those table access methods from the executor requires TupleTableSlots to be carry tuples in the native format of such storage methods; otherwise there'll be a significant conversion overhead. Different access methods will require different data to store tuples efficiently (just like virtual, minimal, heap already require fields in TupleTableSlot). To allow that without requiring additional pointer indirections, we want to have different structs (embedding TupleTableSlot) for different types of slots. Thus different types of slots are needed, which requires adapting creators of slots. The slot that most efficiently can represent a type of tuple in an executor node will often depend on the type of slot a child node uses. Therefore we need to track the type of slot is returned by nodes, so parent slots can create slots based on that. Relatedly, JIT compilation of tuple deforming needs to know which type of slot a certain expression refers to, so it can create an appropriate deforming function for the type of tuple in the slot. But not all nodes will only return one type of slot, e.g. an append node will potentially return different types of slots for each of its subplans. Therefore add function that allows to query the type of a node's result slot, and whether it'll always be the same type (whether it's fixed). This can be queried using ExecGetResultSlotOps(). The scan, result, inner, outer type of slots are automatically inferred from ExecInitScanTupleSlot(), ExecInitResultSlot(), left/right subtrees respectively. If that's not correct for a node, that can be overwritten using new fields in PlanState. This commit does not introduce the actually abstracted implementation of different kind of TupleTableSlots, that will be left for a followup commit. The different types of slots introduced will, for now, still use the same backing implementation. While this already partially invalidates the big comment in tuptable.h, it seems to make more sense to update it later, when the different TupleTableSlot implementations actually exist. Author: Ashutosh Bapat and Andres Freund, with changes by Amit Khandekar Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-16 07:00:30 +01:00
tts_ops = ExecGetResultSlotOps(outerPlanState(&grpstate->ss), NULL);
ExecCreateScanSlotFromOuterPlan(estate, &grpstate->ss, tts_ops);
/*
* Initialize result slot, type and projection.
*/
Introduce notion of different types of slots (without implementing them). Upcoming work intends to allow pluggable ways to introduce new ways of storing table data. Accessing those table access methods from the executor requires TupleTableSlots to be carry tuples in the native format of such storage methods; otherwise there'll be a significant conversion overhead. Different access methods will require different data to store tuples efficiently (just like virtual, minimal, heap already require fields in TupleTableSlot). To allow that without requiring additional pointer indirections, we want to have different structs (embedding TupleTableSlot) for different types of slots. Thus different types of slots are needed, which requires adapting creators of slots. The slot that most efficiently can represent a type of tuple in an executor node will often depend on the type of slot a child node uses. Therefore we need to track the type of slot is returned by nodes, so parent slots can create slots based on that. Relatedly, JIT compilation of tuple deforming needs to know which type of slot a certain expression refers to, so it can create an appropriate deforming function for the type of tuple in the slot. But not all nodes will only return one type of slot, e.g. an append node will potentially return different types of slots for each of its subplans. Therefore add function that allows to query the type of a node's result slot, and whether it'll always be the same type (whether it's fixed). This can be queried using ExecGetResultSlotOps(). The scan, result, inner, outer type of slots are automatically inferred from ExecInitScanTupleSlot(), ExecInitResultSlot(), left/right subtrees respectively. If that's not correct for a node, that can be overwritten using new fields in PlanState. This commit does not introduce the actually abstracted implementation of different kind of TupleTableSlots, that will be left for a followup commit. The different types of slots introduced will, for now, still use the same backing implementation. While this already partially invalidates the big comment in tuptable.h, it seems to make more sense to update it later, when the different TupleTableSlot implementations actually exist. Author: Ashutosh Bapat and Andres Freund, with changes by Amit Khandekar Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-16 07:00:30 +01:00
ExecInitResultTupleSlotTL(&grpstate->ss.ps, &TTSOpsVirtual);
ExecAssignProjectionInfo(&grpstate->ss.ps, NULL);
/*
* initialize child expressions
*/
grpstate->ss.ps.qual =
ExecInitQual(node->plan.qual, (PlanState *) grpstate);
/*
* Precompute fmgr lookup data for inner loop
*/
grpstate->eqfunction =
execTuplesMatchPrepare(ExecGetResultType(outerPlanState(grpstate)),
node->numCols,
node->grpColIdx,
node->grpOperators,
Collations with nondeterministic comparison This adds a flag "deterministic" to collations. If that is false, such a collation disables various optimizations that assume that strings are equal only if they are byte-wise equal. That then allows use cases such as case-insensitive or accent-insensitive comparisons or handling of strings with different Unicode normal forms. This functionality is only supported with the ICU provider. At least glibc doesn't appear to have any locales that work in a nondeterministic way, so it's not worth supporting this for the libc provider. The term "deterministic comparison" in this context is from Unicode Technical Standard #10 (https://unicode.org/reports/tr10/#Deterministic_Comparison). This patch makes changes in three areas: - CREATE COLLATION DDL changes and system catalog changes to support this new flag. - Many executor nodes and auxiliary code are extended to track collations. Previously, this code would just throw away collation information, because the eventually-called user-defined functions didn't use it since they only cared about equality, which didn't need collation information. - String data type functions that do equality comparisons and hashing are changed to take the (non-)deterministic flag into account. For comparison, this just means skipping various shortcuts and tie breakers that use byte-wise comparison. For hashing, we first need to convert the input string to a canonical "sort key" using the ICU analogue of strxfrm(). Reviewed-by: Daniel Verite <daniel@manitou-mail.org> Reviewed-by: Peter Geoghegan <pg@bowt.ie> Discussion: https://www.postgresql.org/message-id/flat/1ccc668f-4cbc-0bef-af67-450b47cdfee7@2ndquadrant.com
2019-03-22 12:09:32 +01:00
node->grpCollations,
&grpstate->ss.ps);
return grpstate;
}
/* ------------------------
* ExecEndGroup(node)
*
* -----------------------
*/
void
ExecEndGroup(GroupState *node)
{
PlanState *outerPlan;
ExecFreeExprContext(&node->ss.ps);
/* clean up tuple table */
ExecClearTuple(node->ss.ss_ScanTupleSlot);
outerPlan = outerPlanState(node);
ExecEndNode(outerPlan);
}
void
ExecReScanGroup(GroupState *node)
{
2015-05-24 03:35:49 +02:00
PlanState *outerPlan = outerPlanState(node);
node->grp_done = false;
/* must clear first tuple */
ExecClearTuple(node->ss.ss_ScanTupleSlot);
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
}