1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* explain.h
|
1996-07-09 08:22:35 +02:00
|
|
|
* prototypes for explain.c
|
|
|
|
*
|
2024-01-04 02:49:05 +01:00
|
|
|
* Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994-5, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/include/commands/explain.h
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#ifndef EXPLAIN_H
|
|
|
|
#define EXPLAIN_H
|
|
|
|
|
2003-02-03 00:46:38 +01:00
|
|
|
#include "executor/executor.h"
|
2012-08-29 01:02:00 +02:00
|
|
|
#include "lib/stringinfo.h"
|
2016-09-06 18:00:00 +02:00
|
|
|
#include "parser/parse_node.h"
|
1997-11-26 02:14:33 +01:00
|
|
|
|
2009-08-10 07:46:50 +02:00
|
|
|
typedef enum ExplainFormat
|
|
|
|
{
|
|
|
|
EXPLAIN_FORMAT_TEXT,
|
|
|
|
EXPLAIN_FORMAT_XML,
|
2009-12-11 02:33:35 +01:00
|
|
|
EXPLAIN_FORMAT_JSON,
|
|
|
|
EXPLAIN_FORMAT_YAML,
|
2009-08-10 07:46:50 +02:00
|
|
|
} ExplainFormat;
|
|
|
|
|
Clean up EXPLAIN's handling of per-worker details.
Previously, it was possible for EXPLAIN ANALYZE of a parallel query
to produce several different "Workers" fields for a single plan node,
because different portions of explain.c independently generated
per-worker data and wrapped that output in separate fields. This
is pretty bogus, especially for the structured output formats: even
if it's not technically illegal, most programs would have a hard time
dealing with such data.
To improve matters, add infrastructure that allows redirecting
per-worker values into a side data structure, and then collect that
data into a single "Workers" field after we've finished running all
the relevant code for a given plan node.
There are a few visible side-effects:
* In text format, instead of something like
Sort Method: external merge Disk: 4920kB
Worker 0: Sort Method: external merge Disk: 5880kB
Worker 1: Sort Method: external merge Disk: 5920kB
Buffers: shared hit=682 read=10188, temp read=1415 written=2101
Worker 0: actual time=130.058..130.324 rows=1324 loops=1
Buffers: shared hit=337 read=3489, temp read=505 written=739
Worker 1: actual time=130.273..130.512 rows=1297 loops=1
Buffers: shared hit=345 read=3507, temp read=505 written=744
you get
Sort Method: external merge Disk: 4920kB
Buffers: shared hit=682 read=10188, temp read=1415 written=2101
Worker 0: actual time=130.058..130.324 rows=1324 loops=1
Sort Method: external merge Disk: 5880kB
Buffers: shared hit=337 read=3489, temp read=505 written=739
Worker 1: actual time=130.273..130.512 rows=1297 loops=1
Sort Method: external merge Disk: 5920kB
Buffers: shared hit=345 read=3507, temp read=505 written=744
* When JIT is enabled, any relevant per-worker JIT stats are attached
to the child node of the Gather or Gather Merge node, which is where
the other per-worker output has always been. Previously, that info
was attached directly to a Gather node, or missed entirely for Gather
Merge.
* A query's summary JIT data no longer includes a bogus
"Worker Number: -1" field.
A notable code-level change is that indenting for lines of text-format
output should now be handled by calling "ExplainIndentText(es)",
instead of hard-wiring how much space to emit. This seems a good deal
cleaner anyway.
This patch also adds a new "explain.sql" regression test script that's
dedicated to testing EXPLAIN. There is more that can be done in that
line, certainly, but for now it just adds some coverage of the XML and
YAML output formats, which had been completely untested.
Although this is surely a bug fix, it's not clear that people would
be happy with rearranging EXPLAIN output in a minor release, so apply
to HEAD only.
Maciek Sakrejda and Tom Lane, based on an idea of Andres Freund's;
reviewed by Georgios Kokolatos
Discussion: https://postgr.es/m/CAOtHd0AvAA8CLB9Xz0wnxu1U=zJCKrr1r4QwwXi_kcQsHDVU=Q@mail.gmail.com
2020-01-26 00:16:42 +01:00
|
|
|
typedef struct ExplainWorkersState
|
|
|
|
{
|
|
|
|
int num_workers; /* # of worker processes the plan used */
|
|
|
|
bool *worker_inited; /* per-worker state-initialized flags */
|
|
|
|
StringInfoData *worker_str; /* per-worker transient output buffers */
|
|
|
|
int *worker_state_save; /* per-worker grouping state save areas */
|
|
|
|
StringInfo prev_str; /* saved output buffer while redirecting */
|
|
|
|
} ExplainWorkersState;
|
|
|
|
|
2009-07-27 01:34:18 +02:00
|
|
|
typedef struct ExplainState
|
|
|
|
{
|
|
|
|
StringInfo str; /* output buffer */
|
|
|
|
/* options */
|
2009-08-10 07:46:50 +02:00
|
|
|
bool verbose; /* be verbose */
|
2009-07-27 01:34:18 +02:00
|
|
|
bool analyze; /* print actual times */
|
2014-10-16 00:50:13 +02:00
|
|
|
bool costs; /* print estimated costs */
|
2009-12-15 05:57:48 +01:00
|
|
|
bool buffers; /* print buffer usage */
|
2020-04-06 04:32:15 +02:00
|
|
|
bool wal; /* print WAL usage */
|
2014-10-16 00:50:13 +02:00
|
|
|
bool timing; /* print detailed node timing */
|
|
|
|
bool summary; /* print total planning and execution timing */
|
2024-01-29 17:53:03 +01:00
|
|
|
bool memory; /* print planner's memory usage information */
|
2019-04-04 00:04:31 +02:00
|
|
|
bool settings; /* print modified settings */
|
2023-03-24 22:07:14 +01:00
|
|
|
bool generic; /* generate a generic plan */
|
2009-08-10 07:46:50 +02:00
|
|
|
ExplainFormat format; /* output format */
|
Print a given subplan only once in EXPLAIN.
We have, for a very long time, allowed the same subplan (same member of the
PlannedStmt.subplans list) to be referenced by more than one SubPlan node;
this avoids problems for cases such as subplans within an IndexScan's
indxqual and indxqualorig fields. However, EXPLAIN had not gotten the memo
and would print each reference as though it were an independent identical
subplan. To fix, track plan_ids of subplans we've printed and don't print
the same plan_id twice. Per report from Pavel Stehule.
BTW: the particular case of IndexScan didn't cause visible duplication
in a plain EXPLAIN, only EXPLAIN ANALYZE, because in the former case we
short-circuit executor startup before the indxqual field is processed by
ExecInitExpr. That seems like it could easily lead to other EXPLAIN
problems in future, but it's not clear how to avoid it without breaking
the "EXPLAIN a plan using hypothetical indexes" use-case. For now I've
left that issue alone.
Although this is a longstanding bug, it's purely cosmetic (no great harm
is done by the repeat printout) and we haven't had field complaints before.
So I'm hesitant to back-patch it, especially since there is some small risk
of ABI problems due to the need to add a new field to ExplainState.
In passing, rearrange order of fields in ExplainState to be less random,
and update some obsolete comments about when/where to initialize them.
Report: <CAFj8pRAimq+NK-menjt+3J4-LFoodDD8Or6=Lc_stcFD+eD4DA@mail.gmail.com>
2016-07-12 00:14:29 +02:00
|
|
|
/* state for output formatting --- not reset for each new plan tree */
|
|
|
|
int indent; /* current indentation level */
|
|
|
|
List *grouping_stack; /* format-specific grouping state */
|
|
|
|
/* state related to the current plan tree (filled by ExplainPrintPlan) */
|
2009-07-27 01:34:18 +02:00
|
|
|
PlannedStmt *pstmt; /* top of plan */
|
|
|
|
List *rtable; /* range table */
|
Improve ruleutils.c's heuristics for dealing with rangetable aliases.
The previous scheme had bugs in some corner cases involving tables that had
been renamed since a view was made. This could result in dumped views that
failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd
Albin, as well as in some pgsql-hackers discussion back in January. Also,
its behavior for printing EXPLAIN plans was sometimes confusing because of
willingness to use the same alias for multiple RTEs (it was Ashutosh
Bapat's complaint about that aspect that started the January thread).
To fix, ensure that each RTE in the query has a unique unqualified alias,
by modifying the alias if necessary (we add "_" and digits as needed to
create a non-conflicting name). Then we can just print its variables with
that alias, avoiding the confusing and bug-prone scheme of sometimes
schema-qualifying variable names. In EXPLAIN, it proves to be expedient to
take the further step of only assigning such aliases to RTEs that are
actually referenced in the query, since the planner has a habit of
generating extra RTEs with the same alias in situations such as
inheritance-tree expansion.
Although this fixes a bug of very long standing, I'm hesitant to back-patch
such a noticeable behavioral change. My experiments while creating a
regression test convinced me that actually incorrect output (as opposed to
confusing output) occurs only in very narrow cases, which is backed up by
the lack of previous complaints from the field. So we may be better off
living with it in released branches; and in any case it'd be smart to let
this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
|
|
|
List *rtable_names; /* alias names for RTEs */
|
Improve performance of EXPLAIN with large range tables.
As of 9.3, ruleutils.c goes to some lengths to ensure that table and column
aliases used in its output are unique. Of course this takes more time than
was required before, which in itself isn't fatal. However, EXPLAIN was set
up so that recalculation of the unique aliases was repeated for each
subexpression printed in a plan. That results in O(N^2) time and memory
consumption for large plan trees, which did not happen in older branches.
Fortunately, the expensive work is the same across a whole plan tree,
so there is no need to repeat it; we can do most of the initialization
just once per query and re-use it for each subexpression. This buys
back most (not all) of the performance loss since 9.2.
We need an extra ExplainState field to hold the precalculated deparse
context. That's no problem in HEAD, but in the back branches, expanding
sizeof(ExplainState) seems risky because third-party extensions might
have local variables of that struct type. So, in 9.4 and 9.3, introduce
an auxiliary struct to keep sizeof(ExplainState) the same. We should
refactor the APIs to avoid such local variables in future, but that's
material for a separate HEAD-only commit.
Per gripe from Alexey Bashtanov. Back-patch to 9.3 where the issue
was introduced.
2015-01-15 19:18:12 +01:00
|
|
|
List *deparse_cxt; /* context list for deparsing expressions */
|
Print a given subplan only once in EXPLAIN.
We have, for a very long time, allowed the same subplan (same member of the
PlannedStmt.subplans list) to be referenced by more than one SubPlan node;
this avoids problems for cases such as subplans within an IndexScan's
indxqual and indxqualorig fields. However, EXPLAIN had not gotten the memo
and would print each reference as though it were an independent identical
subplan. To fix, track plan_ids of subplans we've printed and don't print
the same plan_id twice. Per report from Pavel Stehule.
BTW: the particular case of IndexScan didn't cause visible duplication
in a plain EXPLAIN, only EXPLAIN ANALYZE, because in the former case we
short-circuit executor startup before the indxqual field is processed by
ExecInitExpr. That seems like it could easily lead to other EXPLAIN
problems in future, but it's not clear how to avoid it without breaking
the "EXPLAIN a plan using hypothetical indexes" use-case. For now I've
left that issue alone.
Although this is a longstanding bug, it's purely cosmetic (no great harm
is done by the repeat printout) and we haven't had field complaints before.
So I'm hesitant to back-patch it, especially since there is some small risk
of ABI problems due to the need to add a new field to ExplainState.
In passing, rearrange order of fields in ExplainState to be less random,
and update some obsolete comments about when/where to initialize them.
Report: <CAFj8pRAimq+NK-menjt+3J4-LFoodDD8Or6=Lc_stcFD+eD4DA@mail.gmail.com>
2016-07-12 00:14:29 +02:00
|
|
|
Bitmapset *printed_subplans; /* ids of SubPlans we've printed */
|
2019-12-17 02:14:25 +01:00
|
|
|
bool hide_workers; /* set if we find an invisible Gather */
|
Clean up EXPLAIN's handling of per-worker details.
Previously, it was possible for EXPLAIN ANALYZE of a parallel query
to produce several different "Workers" fields for a single plan node,
because different portions of explain.c independently generated
per-worker data and wrapped that output in separate fields. This
is pretty bogus, especially for the structured output formats: even
if it's not technically illegal, most programs would have a hard time
dealing with such data.
To improve matters, add infrastructure that allows redirecting
per-worker values into a side data structure, and then collect that
data into a single "Workers" field after we've finished running all
the relevant code for a given plan node.
There are a few visible side-effects:
* In text format, instead of something like
Sort Method: external merge Disk: 4920kB
Worker 0: Sort Method: external merge Disk: 5880kB
Worker 1: Sort Method: external merge Disk: 5920kB
Buffers: shared hit=682 read=10188, temp read=1415 written=2101
Worker 0: actual time=130.058..130.324 rows=1324 loops=1
Buffers: shared hit=337 read=3489, temp read=505 written=739
Worker 1: actual time=130.273..130.512 rows=1297 loops=1
Buffers: shared hit=345 read=3507, temp read=505 written=744
you get
Sort Method: external merge Disk: 4920kB
Buffers: shared hit=682 read=10188, temp read=1415 written=2101
Worker 0: actual time=130.058..130.324 rows=1324 loops=1
Sort Method: external merge Disk: 5880kB
Buffers: shared hit=337 read=3489, temp read=505 written=739
Worker 1: actual time=130.273..130.512 rows=1297 loops=1
Sort Method: external merge Disk: 5920kB
Buffers: shared hit=345 read=3507, temp read=505 written=744
* When JIT is enabled, any relevant per-worker JIT stats are attached
to the child node of the Gather or Gather Merge node, which is where
the other per-worker output has always been. Previously, that info
was attached directly to a Gather node, or missed entirely for Gather
Merge.
* A query's summary JIT data no longer includes a bogus
"Worker Number: -1" field.
A notable code-level change is that indenting for lines of text-format
output should now be handled by calling "ExplainIndentText(es)",
instead of hard-wiring how much space to emit. This seems a good deal
cleaner anyway.
This patch also adds a new "explain.sql" regression test script that's
dedicated to testing EXPLAIN. There is more that can be done in that
line, certainly, but for now it just adds some coverage of the XML and
YAML output formats, which had been completely untested.
Although this is surely a bug fix, it's not clear that people would
be happy with rearranging EXPLAIN output in a minor release, so apply
to HEAD only.
Maciek Sakrejda and Tom Lane, based on an idea of Andres Freund's;
reviewed by Georgios Kokolatos
Discussion: https://postgr.es/m/CAOtHd0AvAA8CLB9Xz0wnxu1U=zJCKrr1r4QwwXi_kcQsHDVU=Q@mail.gmail.com
2020-01-26 00:16:42 +01:00
|
|
|
/* state related to the current plan node */
|
|
|
|
ExplainWorkersState *workers_state; /* needed if parallel plan */
|
2009-07-27 01:34:18 +02:00
|
|
|
} ExplainState;
|
|
|
|
|
2007-05-25 19:54:25 +02:00
|
|
|
/* Hook for plugins to get control in ExplainOneQuery() */
|
|
|
|
typedef void (*ExplainOneQuery_hook_type) (Query *query,
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 22:02:35 +01:00
|
|
|
int cursorOptions,
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
IntoClause *into,
|
2009-07-27 01:34:18 +02:00
|
|
|
ExplainState *es,
|
|
|
|
const char *queryString,
|
2018-01-11 18:16:18 +01:00
|
|
|
ParamListInfo params,
|
|
|
|
QueryEnvironment *queryEnv);
|
2007-07-25 14:22:54 +02:00
|
|
|
extern PGDLLIMPORT ExplainOneQuery_hook_type ExplainOneQuery_hook;
|
2007-05-25 19:54:25 +02:00
|
|
|
|
|
|
|
/* Hook for plugins to get control in explain_get_index_name() */
|
|
|
|
typedef const char *(*explain_get_index_name_hook_type) (Oid indexId);
|
2007-07-25 14:22:54 +02:00
|
|
|
extern PGDLLIMPORT explain_get_index_name_hook_type explain_get_index_name_hook;
|
2007-05-25 19:54:25 +02:00
|
|
|
|
2003-02-03 00:46:38 +01:00
|
|
|
|
2020-01-04 11:56:58 +01:00
|
|
|
extern void ExplainQuery(ParseState *pstate, ExplainStmt *stmt,
|
|
|
|
ParamListInfo params, DestReceiver *dest);
|
2003-05-06 22:26:28 +02:00
|
|
|
|
2015-01-15 19:39:33 +01:00
|
|
|
extern ExplainState *NewExplainState(void);
|
2009-07-27 01:34:18 +02:00
|
|
|
|
2003-05-06 22:26:28 +02:00
|
|
|
extern TupleDesc ExplainResultDesc(ExplainStmt *stmt);
|
|
|
|
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into,
|
2017-04-01 06:17:18 +02:00
|
|
|
ExplainState *es, const char *queryString,
|
|
|
|
ParamListInfo params, QueryEnvironment *queryEnv);
|
2007-03-13 01:33:44 +01:00
|
|
|
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-20 02:37:19 +01:00
|
|
|
extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into,
|
2014-01-29 22:04:19 +01:00
|
|
|
ExplainState *es, const char *queryString,
|
2017-04-01 06:17:18 +02:00
|
|
|
ParamListInfo params, QueryEnvironment *queryEnv,
|
2020-04-03 20:13:17 +02:00
|
|
|
const instr_time *planduration,
|
2024-01-29 17:53:03 +01:00
|
|
|
const BufferUsage *bufusage,
|
|
|
|
const MemoryContextCounters *mem_counters);
|
2001-10-28 07:26:15 +01:00
|
|
|
|
2009-07-27 01:34:18 +02:00
|
|
|
extern void ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc);
|
2014-01-20 21:12:50 +01:00
|
|
|
extern void ExplainPrintTriggers(ExplainState *es, QueryDesc *queryDesc);
|
2008-11-19 02:10:24 +01:00
|
|
|
|
2018-10-03 21:48:37 +02:00
|
|
|
extern void ExplainPrintJITSummary(ExplainState *es, QueryDesc *queryDesc);
|
2018-03-28 22:26:51 +02:00
|
|
|
|
2010-02-16 23:19:59 +01:00
|
|
|
extern void ExplainQueryText(ExplainState *es, QueryDesc *queryDesc);
|
2022-07-06 02:55:30 +02:00
|
|
|
extern void ExplainQueryParameters(ExplainState *es, ParamListInfo params, int maxlen);
|
2010-02-16 23:19:59 +01:00
|
|
|
|
2009-12-12 01:35:34 +01:00
|
|
|
extern void ExplainBeginOutput(ExplainState *es);
|
|
|
|
extern void ExplainEndOutput(ExplainState *es);
|
2009-08-10 07:46:50 +02:00
|
|
|
extern void ExplainSeparatePlans(ExplainState *es);
|
|
|
|
|
2011-02-20 06:17:18 +01:00
|
|
|
extern void ExplainPropertyList(const char *qlabel, List *data,
|
|
|
|
ExplainState *es);
|
Support GROUPING SETS, CUBE and ROLLUP.
This SQL standard functionality allows to aggregate data by different
GROUP BY clauses at once. Each grouping set returns rows with columns
grouped by in other sets set to NULL.
This could previously be achieved by doing each grouping as a separate
query, conjoined by UNION ALLs. Besides being considerably more concise,
grouping sets will in many cases be faster, requiring only one scan over
the underlying data.
The current implementation of grouping sets only supports using sorting
for input. Individual sets that share a sort order are computed in one
pass. If there are sets that don't share a sort order, additional sort &
aggregation steps are performed. These additional passes are sourced by
the previous sort step; thus avoiding repeated scans of the source data.
The code is structured in a way that adding support for purely using
hash aggregation or a mix of hashing and sorting is possible. Sorting
was chosen to be supported first, as it is the most generic method of
implementation.
Instead of, as in an earlier versions of the patch, representing the
chain of sort and aggregation steps as full blown planner and executor
nodes, all but the first sort are performed inside the aggregation node
itself. This avoids the need to do some unusual gymnastics to handle
having to return aggregated and non-aggregated tuples from underlying
nodes, as well as having to shut down underlying nodes early to limit
memory usage. The optimizer still builds Sort/Agg node to describe each
phase, but they're not part of the plan tree, but instead additional
data for the aggregation node. They're a convenient and preexisting way
to describe aggregation and sorting. The first (and possibly only) sort
step is still performed as a separate execution step. That retains
similarity with existing group by plans, makes rescans fairly simple,
avoids very deep plans (leading to slow explains) and easily allows to
avoid the sorting step if the underlying data is sorted by other means.
A somewhat ugly side of this patch is having to deal with a grammar
ambiguity between the new CUBE keyword and the cube extension/functions
named cube (and rollup). To avoid breaking existing deployments of the
cube extension it has not been renamed, neither has cube been made a
reserved keyword. Instead precedence hacking is used to make GROUP BY
cube(..) refer to the CUBE grouping sets feature, and not the function
cube(). To actually group by a function cube(), unlikely as that might
be, the function name has to be quoted.
Needs a catversion bump because stored rules may change.
Author: Andrew Gierth and Atri Sharma, with contributions from Andres Freund
Reviewed-By: Andres Freund, Noah Misch, Tom Lane, Svenne Krap, Tomas
Vondra, Erik Rijkers, Marti Raudsepp, Pavel Stehule
Discussion: CAOeZVidmVRe2jU6aMk_5qkxnB7dfmPROzM7Ur8JPW5j8Y5X-Lw@mail.gmail.com
2015-05-16 03:40:59 +02:00
|
|
|
extern void ExplainPropertyListNested(const char *qlabel, List *data,
|
|
|
|
ExplainState *es);
|
2011-02-20 06:17:18 +01:00
|
|
|
extern void ExplainPropertyText(const char *qlabel, const char *value,
|
|
|
|
ExplainState *es);
|
2018-03-17 07:13:12 +01:00
|
|
|
extern void ExplainPropertyInteger(const char *qlabel, const char *unit,
|
|
|
|
int64 value, ExplainState *es);
|
2020-04-06 04:32:15 +02:00
|
|
|
extern void ExplainPropertyUInteger(const char *qlabel, const char *unit,
|
|
|
|
uint64 value, ExplainState *es);
|
2018-03-17 07:13:12 +01:00
|
|
|
extern void ExplainPropertyFloat(const char *qlabel, const char *unit,
|
|
|
|
double value, int ndigits, ExplainState *es);
|
Fix some infelicities in EXPLAIN output for parallel query plans.
In non-text output formats, parallelized aggregates were reporting
"Partial" or "Finalize" as a field named "Operation", which might be all
right in the absence of any context --- but other plan node types use that
field to report SQL-visible semantics, such as Select/Insert/Update/Delete.
So that naming choice didn't seem good to me. I changed it to "Partial
Mode".
Also, the field did not appear at all for a non-parallelized Agg plan node,
which is contrary to expectation in non-text formats. We're notionally
producing objects that conform to a schema, so the set of fields for a
given node type and EXPLAIN mode should be well-defined. I set it up to
fill in "Simple" in such cases.
Other fields that were added for parallel query, namely "Parallel Aware"
and Gather's "Single Copy", had not gotten the word on that point either.
Make them appear always in non-text output.
Also, the latter two fields were nominally producing boolean output, but
were getting it wrong, because bool values shouldn't be quoted in JSON or
YAML. Somehow we'd not needed an ExplainPropertyBool formatting subroutine
before 9.6; but now we do, so invent it.
Discussion: <16002.1466972724@sss.pgh.pa.us>
2016-06-30 00:51:20 +02:00
|
|
|
extern void ExplainPropertyBool(const char *qlabel, bool value,
|
|
|
|
ExplainState *es);
|
2011-02-20 06:17:18 +01:00
|
|
|
|
2017-09-18 22:01:16 +02:00
|
|
|
extern void ExplainOpenGroup(const char *objtype, const char *labelname,
|
|
|
|
bool labeled, ExplainState *es);
|
|
|
|
extern void ExplainCloseGroup(const char *objtype, const char *labelname,
|
|
|
|
bool labeled, ExplainState *es);
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
#endif /* EXPLAIN_H */
|