postgresql/src/test/regress/expected/aggregates.out

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

3329 lines
98 KiB
Plaintext
Raw Normal View History

--
-- AGGREGATES
--
-- directory paths are passed to us in environment variables
\getenv abs_srcdir PG_ABS_SRCDIR
Change floating-point output format for improved performance. Previously, floating-point output was done by rounding to a specific decimal precision; by default, to 6 or 15 decimal digits (losing information) or as requested using extra_float_digits. Drivers that wanted exact float values, and applications like pg_dump that must preserve values exactly, set extra_float_digits=3 (or sometimes 2 for historical reasons, though this isn't enough for float4). Unfortunately, decimal rounded output is slow enough to become a noticable bottleneck when dealing with large result sets or COPY of large tables when many floating-point values are involved. Floating-point output can be done much faster when the output is not rounded to a specific decimal length, but rather is chosen as the shortest decimal representation that is closer to the original float value than to any other value representable in the same precision. The recently published Ryu algorithm by Ulf Adams is both relatively simple and remarkably fast. Accordingly, change float4out/float8out to output shortest decimal representations if extra_float_digits is greater than 0, and make that the new default. Applications that need rounded output can set extra_float_digits back to 0 or below, and take the resulting performance hit. We make one concession to portability for systems with buggy floating-point input: we do not output decimal values that fall exactly halfway between adjacent representable binary values (which would rely on the reader doing round-to-nearest-even correctly). This is known to be a problem at least for VS2013 on Windows. Our version of the Ryu code originates from https://github.com/ulfjack/ryu/ at commit c9c3fb1979, but with the following (significant) modifications: - Output format is changed to use fixed-point notation for small exponents, as printf would, and also to use lowercase 'e', a minimum of 2 exponent digits, and a mandatory sign on the exponent, to keep the formatting as close as possible to previous output. - The output of exact midpoint values is disabled as noted above. - The integer fast-path code is changed somewhat (since we have fixed-point output and the upstream did not). - Our project style has been largely applied to the code with the exception of C99 declaration-after-statement, which has been retained as an exception to our present policy. - Most of upstream's debugging and conditionals are removed, and we use our own configure tests to determine things like uint128 availability. Changing the float output format obviously affects a number of regression tests. This patch uses an explicit setting of extra_float_digits=0 for test output that is not expected to be exactly reproducible (e.g. due to numerical instability or differing algorithms for transcendental functions). Conversions from floats to numeric are unchanged by this patch. These may appear in index expressions and it is not yet clear whether any change should be made, so that can be left for another day. This patch assumes that the only supported floating point format is now IEEE format, and the documentation is updated to reflect that. Code by me, adapting the work of Ulf Adams and other contributors. References: https://dl.acm.org/citation.cfm?id=3192369 Reviewed-by: Tom Lane, Andres Freund, Donald Dong Discussion: https://postgr.es/m/87r2el1bx6.fsf@news-spur.riddles.org.uk
2019-02-13 16:20:33 +01:00
-- avoid bit-exact output here because operations may not be bit-exact.
SET extra_float_digits = 0;
-- prepare some test data
CREATE TABLE aggtest (
a int2,
b float4
);
\set filename :abs_srcdir '/data/agg.data'
COPY aggtest FROM :'filename';
ANALYZE aggtest;
SELECT avg(four) AS avg_1 FROM onek;
avg_1
--------------------
1.5000000000000000
(1 row)
SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100;
avg_32
---------------------
32.6666666666666667
(1 row)
SELECT any_value(v) FROM (VALUES (1), (2), (3)) AS v (v);
any_value
-----------
1
(1 row)
SELECT any_value(v) FROM (VALUES (NULL)) AS v (v);
any_value
-----------
(1 row)
SELECT any_value(v) FROM (VALUES (NULL), (1), (2)) AS v (v);
any_value
-----------
1
(1 row)
SELECT any_value(v) FROM (VALUES (array['hello', 'world'])) AS v (v);
any_value
---------------
{hello,world}
(1 row)
-- In 7.1, avg(float4) is computed using float8 arithmetic.
-- Round the result to 3 digits to avoid platform-specific results.
SELECT avg(b)::numeric(10,3) AS avg_107_943 FROM aggtest;
avg_107_943
-------------
107.943
(1 row)
SELECT avg(gpa) AS avg_3_4 FROM ONLY student;
avg_3_4
---------
3.4
(1 row)
SELECT sum(four) AS sum_1500 FROM onek;
sum_1500
----------
1500
(1 row)
SELECT sum(a) AS sum_198 FROM aggtest;
sum_198
---------
198
(1 row)
SELECT sum(b) AS avg_431_773 FROM aggtest;
avg_431_773
-------------
431.773
(1 row)
SELECT sum(gpa) AS avg_6_8 FROM ONLY student;
avg_6_8
---------
6.8
(1 row)
SELECT max(four) AS max_3 FROM onek;
max_3
-------
3
(1 row)
SELECT max(a) AS max_100 FROM aggtest;
max_100
---------
100
(1 row)
SELECT max(aggtest.b) AS max_324_78 FROM aggtest;
max_324_78
------------
324.78
(1 row)
SELECT max(student.gpa) AS max_3_7 FROM student;
max_3_7
---------
3.7
(1 row)
SELECT stddev_pop(b) FROM aggtest;
stddev_pop
-----------------
131.10703231895
(1 row)
SELECT stddev_samp(b) FROM aggtest;
stddev_samp
------------------
151.389360803998
(1 row)
SELECT var_pop(b) FROM aggtest;
var_pop
------------------
17189.0539234823
(1 row)
SELECT var_samp(b) FROM aggtest;
var_samp
------------------
22918.7385646431
(1 row)
SELECT stddev_pop(b::numeric) FROM aggtest;
stddev_pop
------------------
131.107032862199
(1 row)
SELECT stddev_samp(b::numeric) FROM aggtest;
stddev_samp
------------------
151.389361431288
(1 row)
SELECT var_pop(b::numeric) FROM aggtest;
var_pop
--------------------
17189.054065929769
(1 row)
SELECT var_samp(b::numeric) FROM aggtest;
var_samp
--------------------
22918.738754573025
(1 row)
-- population variance is defined for a single tuple, sample variance
-- is not
SELECT var_pop(1.0::float8), var_samp(2.0::float8);
var_pop | var_samp
---------+----------
0 |
(1 row)
SELECT stddev_pop(3.0::float8), stddev_samp(4.0::float8);
stddev_pop | stddev_samp
------------+-------------
0 |
(1 row)
SELECT var_pop('inf'::float8), var_samp('inf'::float8);
var_pop | var_samp
---------+----------
NaN |
(1 row)
SELECT stddev_pop('inf'::float8), stddev_samp('inf'::float8);
stddev_pop | stddev_samp
------------+-------------
NaN |
(1 row)
SELECT var_pop('nan'::float8), var_samp('nan'::float8);
var_pop | var_samp
---------+----------
NaN |
(1 row)
SELECT stddev_pop('nan'::float8), stddev_samp('nan'::float8);
stddev_pop | stddev_samp
------------+-------------
NaN |
(1 row)
SELECT var_pop(1.0::float4), var_samp(2.0::float4);
var_pop | var_samp
---------+----------
0 |
(1 row)
SELECT stddev_pop(3.0::float4), stddev_samp(4.0::float4);
stddev_pop | stddev_samp
------------+-------------
0 |
(1 row)
SELECT var_pop('inf'::float4), var_samp('inf'::float4);
var_pop | var_samp
---------+----------
NaN |
(1 row)
SELECT stddev_pop('inf'::float4), stddev_samp('inf'::float4);
stddev_pop | stddev_samp
------------+-------------
NaN |
(1 row)
SELECT var_pop('nan'::float4), var_samp('nan'::float4);
var_pop | var_samp
---------+----------
NaN |
(1 row)
SELECT stddev_pop('nan'::float4), stddev_samp('nan'::float4);
stddev_pop | stddev_samp
------------+-------------
NaN |
(1 row)
SELECT var_pop(1.0::numeric), var_samp(2.0::numeric);
var_pop | var_samp
---------+----------
0 |
(1 row)
SELECT stddev_pop(3.0::numeric), stddev_samp(4.0::numeric);
stddev_pop | stddev_samp
------------+-------------
0 |
(1 row)
SELECT var_pop('inf'::numeric), var_samp('inf'::numeric);
var_pop | var_samp
---------+----------
NaN |
(1 row)
SELECT stddev_pop('inf'::numeric), stddev_samp('inf'::numeric);
stddev_pop | stddev_samp
------------+-------------
NaN |
(1 row)
SELECT var_pop('nan'::numeric), var_samp('nan'::numeric);
var_pop | var_samp
---------+----------
NaN |
(1 row)
SELECT stddev_pop('nan'::numeric), stddev_samp('nan'::numeric);
stddev_pop | stddev_samp
------------+-------------
NaN |
(1 row)
-- verify correct results for null and NaN inputs
select sum(null::int4) from generate_series(1,3);
sum
-----
(1 row)
select sum(null::int8) from generate_series(1,3);
sum
-----
(1 row)
select sum(null::numeric) from generate_series(1,3);
sum
-----
(1 row)
select sum(null::float8) from generate_series(1,3);
sum
-----
(1 row)
select avg(null::int4) from generate_series(1,3);
avg
-----
(1 row)
select avg(null::int8) from generate_series(1,3);
avg
-----
(1 row)
select avg(null::numeric) from generate_series(1,3);
avg
-----
(1 row)
select avg(null::float8) from generate_series(1,3);
avg
-----
(1 row)
select sum('NaN'::numeric) from generate_series(1,3);
sum
-----
NaN
(1 row)
select avg('NaN'::numeric) from generate_series(1,3);
avg
-----
NaN
(1 row)
-- verify correct results for infinite inputs
SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
FROM (VALUES ('1'), ('infinity')) v(x);
sum | avg | var_pop
----------+----------+---------
Infinity | Infinity | NaN
(1 row)
SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
FROM (VALUES ('infinity'), ('1')) v(x);
sum | avg | var_pop
----------+----------+---------
Infinity | Infinity | NaN
(1 row)
SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
FROM (VALUES ('infinity'), ('infinity')) v(x);
sum | avg | var_pop
----------+----------+---------
Infinity | Infinity | NaN
(1 row)
SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
FROM (VALUES ('-infinity'), ('infinity')) v(x);
sum | avg | var_pop
-----+-----+---------
NaN | NaN | NaN
(1 row)
SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
FROM (VALUES ('-infinity'), ('-infinity')) v(x);
sum | avg | var_pop
-----------+-----------+---------
-Infinity | -Infinity | NaN
(1 row)
SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
FROM (VALUES ('1'), ('infinity')) v(x);
sum | avg | var_pop
----------+----------+---------
Infinity | Infinity | NaN
(1 row)
SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
FROM (VALUES ('infinity'), ('1')) v(x);
sum | avg | var_pop
----------+----------+---------
Infinity | Infinity | NaN
(1 row)
SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
FROM (VALUES ('infinity'), ('infinity')) v(x);
sum | avg | var_pop
----------+----------+---------
Infinity | Infinity | NaN
(1 row)
SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
FROM (VALUES ('-infinity'), ('infinity')) v(x);
sum | avg | var_pop
-----+-----+---------
NaN | NaN | NaN
(1 row)
SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
FROM (VALUES ('-infinity'), ('-infinity')) v(x);
sum | avg | var_pop
-----------+-----------+---------
-Infinity | -Infinity | NaN
(1 row)
-- test accuracy with a large input offset
SELECT avg(x::float8), var_pop(x::float8)
FROM (VALUES (100000003), (100000004), (100000006), (100000007)) v(x);
avg | var_pop
-----------+---------
100000005 | 2.5
(1 row)
SELECT avg(x::float8), var_pop(x::float8)
FROM (VALUES (7000000000005), (7000000000007)) v(x);
avg | var_pop
---------------+---------
7000000000006 | 1
(1 row)
-- SQL2003 binary aggregates
SELECT regr_count(b, a) FROM aggtest;
regr_count
------------
4
(1 row)
SELECT regr_sxx(b, a) FROM aggtest;
regr_sxx
----------
5099
(1 row)
SELECT regr_syy(b, a) FROM aggtest;
regr_syy
------------------
68756.2156939293
(1 row)
SELECT regr_sxy(b, a) FROM aggtest;
regr_sxy
------------------
2614.51582155004
(1 row)
SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest;
regr_avgx | regr_avgy
-----------+------------------
49.5 | 107.943152273074
(1 row)
SELECT regr_r2(b, a) FROM aggtest;
regr_r2
--------------------
0.0194977982031803
(1 row)
SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest;
regr_slope | regr_intercept
-------------------+------------------
0.512750700441271 | 82.5619926012309
(1 row)
SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest;
covar_pop | covar_samp
-----------------+------------------
653.62895538751 | 871.505273850014
(1 row)
SELECT corr(b, a) FROM aggtest;
corr
-------------------
0.139634516517873
(1 row)
-- check single-tuple behavior
SELECT covar_pop(1::float8,2::float8), covar_samp(3::float8,4::float8);
covar_pop | covar_samp
-----------+------------
0 |
(1 row)
SELECT covar_pop(1::float8,'inf'::float8), covar_samp(3::float8,'inf'::float8);
covar_pop | covar_samp
-----------+------------
NaN |
(1 row)
SELECT covar_pop(1::float8,'nan'::float8), covar_samp(3::float8,'nan'::float8);
covar_pop | covar_samp
-----------+------------
NaN |
(1 row)
-- test accum and combine functions directly
CREATE TABLE regr_test (x float8, y float8);
INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200);
SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
FROM regr_test WHERE x IN (10,20,30,80);
count | sum | regr_sxx | sum | regr_syy | regr_sxy
-------+-----+----------+------+----------+----------
4 | 140 | 2900 | 1290 | 83075 | 15050
(1 row)
SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
FROM regr_test;
count | sum | regr_sxx | sum | regr_syy | regr_sxy
-------+-----+----------+------+----------+----------
5 | 240 | 6280 | 1490 | 95080 | 8680
(1 row)
SELECT float8_accum('{4,140,2900}'::float8[], 100);
float8_accum
--------------
{5,240,6280}
(1 row)
SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100);
float8_regr_accum
------------------------------
{5,240,6280,1490,95080,8680}
(1 row)
SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
FROM regr_test WHERE x IN (10,20,30);
count | sum | regr_sxx | sum | regr_syy | regr_sxy
-------+-----+----------+-----+----------+----------
3 | 60 | 200 | 750 | 20000 | 2000
(1 row)
SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
FROM regr_test WHERE x IN (80,100);
count | sum | regr_sxx | sum | regr_syy | regr_sxy
-------+-----+----------+-----+----------+----------
2 | 180 | 200 | 740 | 57800 | -3400
(1 row)
SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]);
float8_combine
----------------
{3,60,200}
(1 row)
SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]);
float8_combine
----------------
{2,180,200}
(1 row)
SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]);
float8_combine
----------------
{5,240,6280}
(1 row)
SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[],
'{0,0,0,0,0,0}'::float8[]);
float8_regr_combine
---------------------------
{3,60,200,750,20000,2000}
(1 row)
SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[],
'{2,180,200,740,57800,-3400}'::float8[]);
float8_regr_combine
-----------------------------
{2,180,200,740,57800,-3400}
(1 row)
SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[],
'{2,180,200,740,57800,-3400}'::float8[]);
float8_regr_combine
------------------------------
{5,240,6280,1490,95080,8680}
(1 row)
DROP TABLE regr_test;
-- test count, distinct
SELECT count(four) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT count(DISTINCT four) AS cnt_4 FROM onek;
cnt_4
-------
4
(1 row)
select ten, count(*), sum(four) from onek
group by ten order by ten;
ten | count | sum
-----+-------+-----
0 | 100 | 100
1 | 100 | 200
2 | 100 | 100
3 | 100 | 200
4 | 100 | 100
5 | 100 | 200
6 | 100 | 100
7 | 100 | 200
8 | 100 | 100
9 | 100 | 200
(10 rows)
select ten, count(four), sum(DISTINCT four) from onek
group by ten order by ten;
ten | count | sum
-----+-------+-----
0 | 100 | 2
1 | 100 | 4
2 | 100 | 2
3 | 100 | 4
4 | 100 | 2
5 | 100 | 4
6 | 100 | 2
7 | 100 | 4
8 | 100 | 2
9 | 100 | 4
(10 rows)
-- user-defined aggregates
SELECT newavg(four) AS avg_1 FROM onek;
avg_1
--------------------
1.5000000000000000
(1 row)
SELECT newsum(four) AS sum_1500 FROM onek;
sum_1500
----------
1500
(1 row)
SELECT newcnt(four) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT newcnt(*) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT oldcnt(*) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT sum2(q1,q2) FROM int8_tbl;
sum2
-------------------
18271560493827981
(1 row)
-- test for outer-level aggregates
-- this should work
select ten, sum(distinct four) from onek a
group by ten
having exists (select 1 from onek b where sum(distinct a.four) = b.four);
ten | sum
-----+-----
0 | 2
2 | 2
4 | 2
6 | 2
8 | 2
(5 rows)
-- this should fail because subquery has an agg of its own in WHERE
select ten, sum(distinct four) from onek a
group by ten
having exists (select 1 from onek b
where sum(distinct a.four + b.four) = b.four);
ERROR: aggregate functions are not allowed in WHERE
LINE 4: where sum(distinct a.four + b.four) = b.four)...
^
-- Test handling of sublinks within outer-level aggregates.
-- Per bug report from Daniel Grace.
select
(select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1)))
from tenk1 o;
max
------
9999
(1 row)
-- Test handling of Params within aggregate arguments in hashed aggregation.
-- Per bug report from Jeevan Chalke.
explain (verbose, costs off)
select s1, s2, sm
from generate_series(1, 3) s1,
lateral (select s2, sum(s1 + s2) sm
from generate_series(1, 3) s2 group by s2) ss
order by 1, 2;
QUERY PLAN
------------------------------------------------------------------
Sort
Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2)))
Sort Key: s1.s1, s2.s2
-> Nested Loop
Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2)))
-> Function Scan on pg_catalog.generate_series s1
Output: s1.s1
Function Call: generate_series(1, 3)
-> HashAggregate
Output: s2.s2, sum((s1.s1 + s2.s2))
Group Key: s2.s2
-> Function Scan on pg_catalog.generate_series s2
Output: s2.s2
Function Call: generate_series(1, 3)
(14 rows)
select s1, s2, sm
from generate_series(1, 3) s1,
lateral (select s2, sum(s1 + s2) sm
from generate_series(1, 3) s2 group by s2) ss
order by 1, 2;
s1 | s2 | sm
----+----+----
1 | 1 | 2
1 | 2 | 3
1 | 3 | 4
2 | 1 | 3
2 | 2 | 4
2 | 3 | 5
3 | 1 | 4
3 | 2 | 5
3 | 3 | 6
(9 rows)
explain (verbose, costs off)
select array(select sum(x+y) s
from generate_series(1,3) y group by y order by s)
from generate_series(1,3) x;
QUERY PLAN
-------------------------------------------------------------------
Function Scan on pg_catalog.generate_series x
Output: ARRAY(SubPlan 1)
Function Call: generate_series(1, 3)
SubPlan 1
-> Sort
Output: (sum((x.x + y.y))), y.y
Sort Key: (sum((x.x + y.y)))
-> HashAggregate
Output: sum((x.x + y.y)), y.y
Group Key: y.y
-> Function Scan on pg_catalog.generate_series y
Output: y.y
Function Call: generate_series(1, 3)
(13 rows)
select array(select sum(x+y) s
from generate_series(1,3) y group by y order by s)
from generate_series(1,3) x;
array
---------
{2,3,4}
{3,4,5}
{4,5,6}
(3 rows)
--
-- test for bitwise integer aggregates
--
CREATE TEMPORARY TABLE bitwise_test(
i2 INT2,
i4 INT4,
i8 INT8,
i INTEGER,
x INT2,
y BIT(4)
);
-- empty case
SELECT
BIT_AND(i2) AS "?",
BIT_OR(i4) AS "?",
BIT_XOR(i8) AS "?"
FROM bitwise_test;
? | ? | ?
---+---+---
| |
(1 row)
COPY bitwise_test FROM STDIN NULL 'null';
SELECT
BIT_AND(i2) AS "1",
BIT_AND(i4) AS "1",
BIT_AND(i8) AS "1",
BIT_AND(i) AS "?",
BIT_AND(x) AS "0",
BIT_AND(y) AS "0100",
BIT_OR(i2) AS "7",
BIT_OR(i4) AS "7",
BIT_OR(i8) AS "7",
BIT_OR(i) AS "?",
BIT_OR(x) AS "7",
BIT_OR(y) AS "1101",
BIT_XOR(i2) AS "5",
BIT_XOR(i4) AS "5",
BIT_XOR(i8) AS "5",
BIT_XOR(i) AS "?",
BIT_XOR(x) AS "7",
BIT_XOR(y) AS "1101"
FROM bitwise_test;
1 | 1 | 1 | ? | 0 | 0100 | 7 | 7 | 7 | ? | 7 | 1101 | 5 | 5 | 5 | ? | 7 | 1101
---+---+---+---+---+------+---+---+---+---+---+------+---+---+---+---+---+------
1 | 1 | 1 | 1 | 0 | 0100 | 7 | 7 | 7 | 3 | 7 | 1101 | 5 | 5 | 5 | 2 | 7 | 1101
(1 row)
--
-- test boolean aggregates
--
-- first test all possible transition and final states
SELECT
-- boolean and transitions
-- null because strict
booland_statefunc(NULL, NULL) IS NULL AS "t",
booland_statefunc(TRUE, NULL) IS NULL AS "t",
booland_statefunc(FALSE, NULL) IS NULL AS "t",
booland_statefunc(NULL, TRUE) IS NULL AS "t",
booland_statefunc(NULL, FALSE) IS NULL AS "t",
-- and actual computations
booland_statefunc(TRUE, TRUE) AS "t",
NOT booland_statefunc(TRUE, FALSE) AS "t",
NOT booland_statefunc(FALSE, TRUE) AS "t",
NOT booland_statefunc(FALSE, FALSE) AS "t";
t | t | t | t | t | t | t | t | t
---+---+---+---+---+---+---+---+---
t | t | t | t | t | t | t | t | t
(1 row)
SELECT
-- boolean or transitions
-- null because strict
boolor_statefunc(NULL, NULL) IS NULL AS "t",
boolor_statefunc(TRUE, NULL) IS NULL AS "t",
boolor_statefunc(FALSE, NULL) IS NULL AS "t",
boolor_statefunc(NULL, TRUE) IS NULL AS "t",
boolor_statefunc(NULL, FALSE) IS NULL AS "t",
-- actual computations
boolor_statefunc(TRUE, TRUE) AS "t",
boolor_statefunc(TRUE, FALSE) AS "t",
boolor_statefunc(FALSE, TRUE) AS "t",
NOT boolor_statefunc(FALSE, FALSE) AS "t";
t | t | t | t | t | t | t | t | t
---+---+---+---+---+---+---+---+---
t | t | t | t | t | t | t | t | t
(1 row)
CREATE TEMPORARY TABLE bool_test(
b1 BOOL,
b2 BOOL,
b3 BOOL,
b4 BOOL);
-- empty case
SELECT
BOOL_AND(b1) AS "n",
BOOL_OR(b3) AS "n"
FROM bool_test;
n | n
---+---
|
(1 row)
COPY bool_test FROM STDIN NULL 'null';
SELECT
BOOL_AND(b1) AS "f",
BOOL_AND(b2) AS "t",
BOOL_AND(b3) AS "f",
BOOL_AND(b4) AS "n",
BOOL_AND(NOT b2) AS "f",
BOOL_AND(NOT b3) AS "t"
FROM bool_test;
f | t | f | n | f | t
---+---+---+---+---+---
f | t | f | | f | t
(1 row)
SELECT
EVERY(b1) AS "f",
EVERY(b2) AS "t",
EVERY(b3) AS "f",
EVERY(b4) AS "n",
EVERY(NOT b2) AS "f",
EVERY(NOT b3) AS "t"
FROM bool_test;
f | t | f | n | f | t
---+---+---+---+---+---
f | t | f | | f | t
(1 row)
SELECT
BOOL_OR(b1) AS "t",
BOOL_OR(b2) AS "t",
BOOL_OR(b3) AS "f",
BOOL_OR(b4) AS "n",
BOOL_OR(NOT b2) AS "f",
BOOL_OR(NOT b3) AS "t"
FROM bool_test;
t | t | f | n | f | t
---+---+---+---+---+---
t | t | f | | f | t
(1 row)
--
-- Test cases that should be optimized into indexscans instead of
-- the generic aggregate implementation.
--
-- Basic cases
explain (costs off)
select min(unique1) from tenk1;
QUERY PLAN
------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Index Only Scan using tenk1_unique1 on tenk1
Index Cond: (unique1 IS NOT NULL)
(5 rows)
select min(unique1) from tenk1;
min
-----
0
(1 row)
explain (costs off)
select max(unique1) from tenk1;
QUERY PLAN
---------------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: (unique1 IS NOT NULL)
(5 rows)
select max(unique1) from tenk1;
max
------
9999
(1 row)
explain (costs off)
select max(unique1) from tenk1 where unique1 < 42;
QUERY PLAN
------------------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 < 42))
(5 rows)
select max(unique1) from tenk1 where unique1 < 42;
max
-----
41
(1 row)
explain (costs off)
select max(unique1) from tenk1 where unique1 > 42;
QUERY PLAN
------------------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42))
(5 rows)
select max(unique1) from tenk1 where unique1 > 42;
max
------
9999
(1 row)
-- the planner may choose a generic aggregate here if parallel query is
-- enabled, since that plan will be parallel safe and the "optimized"
-- plan, which has almost identical cost, will not be. we want to test
-- the optimized plan, so temporarily disable parallel query.
begin;
set local max_parallel_workers_per_gather = 0;
explain (costs off)
select max(unique1) from tenk1 where unique1 > 42000;
QUERY PLAN
---------------------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42000))
(5 rows)
select max(unique1) from tenk1 where unique1 > 42000;
max
-----
(1 row)
rollback;
-- multi-column index (uses tenk1_thous_tenthous)
explain (costs off)
select max(tenthous) from tenk1 where thousand = 33;
QUERY PLAN
----------------------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_thous_tenthous on tenk1
Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL))
(5 rows)
select max(tenthous) from tenk1 where thousand = 33;
max
------
9033
(1 row)
explain (costs off)
select min(tenthous) from tenk1 where thousand = 33;
QUERY PLAN
--------------------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Index Only Scan using tenk1_thous_tenthous on tenk1
Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL))
(5 rows)
select min(tenthous) from tenk1 where thousand = 33;
min
-----
33
(1 row)
-- check parameter propagation into an indexscan subquery
explain (costs off)
select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
from int4_tbl;
QUERY PLAN
-----------------------------------------------------------------------------------------
Seq Scan on int4_tbl
SubPlan 2
-> Result
InitPlan 1
-> Limit
-> Index Only Scan using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 > int4_tbl.f1))
(7 rows)
select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
from int4_tbl;
f1 | gt
-------------+----
0 | 1
123456 |
-123456 | 0
2147483647 |
-2147483647 | 0
(5 rows)
-- check some cases that were handled incorrectly in 8.3.0
explain (costs off)
select distinct max(unique2) from tenk1;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Group Key: (InitPlan 1).col1
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select distinct max(unique2) from tenk1;
max
------
9999
(1 row)
explain (costs off)
select max(unique2) from tenk1 order by 1;
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: ((InitPlan 1).col1)
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select max(unique2) from tenk1 order by 1;
max
------
9999
(1 row)
explain (costs off)
select max(unique2) from tenk1 order by max(unique2);
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: ((InitPlan 1).col1)
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select max(unique2) from tenk1 order by max(unique2);
max
------
9999
(1 row)
explain (costs off)
select max(unique2) from tenk1 order by max(unique2)+1;
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: (((InitPlan 1).col1 + 1))
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select max(unique2) from tenk1 order by max(unique2)+1;
max
------
9999
(1 row)
explain (costs off)
select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: (generate_series(1, 3)) DESC
InitPlan 1
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
Move targetlist SRF handling from expression evaluation to new executor node. Evaluation of set returning functions (SRFs_ in the targetlist (like SELECT generate_series(1,5)) so far was done in the expression evaluation (i.e. ExecEvalExpr()) and projection (i.e. ExecProject/ExecTargetList) code. This meant that most executor nodes performing projection, and most expression evaluation functions, had to deal with the possibility that an evaluated expression could return a set of return values. That's bad because it leads to repeated code in a lot of places. It also, and that's my (Andres's) motivation, made it a lot harder to implement a more efficient way of doing expression evaluation. To fix this, introduce a new executor node (ProjectSet) that can evaluate targetlists containing one or more SRFs. To avoid the complexity of the old way of handling nested expressions returning sets (e.g. having to pass up ExprDoneCond, and dealing with arguments to functions returning sets etc.), those SRFs can only be at the top level of the node's targetlist. The planner makes sure (via split_pathtarget_at_srfs()) that SRF evaluation is only necessary in ProjectSet nodes and that SRFs are only present at the top level of the node's targetlist. If there are nested SRFs the planner creates multiple stacked ProjectSet nodes. The ProjectSet nodes always get input from an underlying node. We also discussed and prototyped evaluating targetlist SRFs using ROWS FROM(), but that turned out to be more complicated than we'd hoped. While moving SRF evaluation to ProjectSet would allow to retain the old "least common multiple" behavior when multiple SRFs are present in one targetlist (i.e. continue returning rows until all SRFs are at the end of their input at the same time), we decided to instead only return rows till all SRFs are exhausted, returning NULL for already exhausted ones. We deemed the previous behavior to be too confusing, unexpected and actually not particularly useful. As a side effect, the previously prohibited case of multiple set returning arguments to a function, is now allowed. Not because it's particularly desirable, but because it ends up working and there seems to be no argument for adding code to prohibit it. Currently the behavior for COALESCE and CASE containing SRFs has changed, returning multiple rows from the expression, even when the SRF containing "arm" of the expression is not evaluated. That's because the SRFs are evaluated in a separate ProjectSet node. As that's quite confusing, we're likely to instead prohibit SRFs in those places. But that's still being discussed, and the code would reside in places not touched here, so that's a task for later. There's a lot of, now superfluous, code dealing with set return expressions around. But as the changes to get rid of those are verbose largely boring, it seems better for readability to keep the cleanup as a separate commit. Author: Tom Lane and Andres Freund Discussion: https://postgr.es/m/20160822214023.aaxz5l4igypowyri@alap3.anarazel.de
2017-01-18 21:46:50 +01:00
-> ProjectSet
-> Result
(8 rows)
select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
max | g
------+---
9999 | 3
9999 | 2
9999 | 1
(3 rows)
-- interesting corner case: constant gets optimized into a seqscan
explain (costs off)
select max(100) from tenk1;
QUERY PLAN
----------------------------------------------------
Result
InitPlan 1
-> Limit
-> Result
One-Time Filter: (100 IS NOT NULL)
-> Seq Scan on tenk1
(6 rows)
select max(100) from tenk1;
max
-----
100
(1 row)
-- try it on an inheritance tree
create table minmaxtest(f1 int);
create table minmaxtest1() inherits (minmaxtest);
create table minmaxtest2() inherits (minmaxtest);
create table minmaxtest3() inherits (minmaxtest);
create index minmaxtesti on minmaxtest(f1);
create index minmaxtest1i on minmaxtest1(f1);
create index minmaxtest2i on minmaxtest2(f1 desc);
create index minmaxtest3i on minmaxtest3(f1) where f1 is not null;
insert into minmaxtest values(11), (12);
insert into minmaxtest1 values(13), (14);
insert into minmaxtest2 values(15), (16);
insert into minmaxtest3 values(17), (18);
explain (costs off)
select min(f1), max(f1) from minmaxtest;
Fix EXPLAIN's column alias output for mismatched child tables. If an inheritance/partitioning parent table is assigned some column alias names in the query, EXPLAIN mapped those aliases onto the child tables' columns by physical position, resulting in bogus output if a child table's columns aren't one-for-one with the parent's. To fix, make expand_single_inheritance_child() generate a correctly re-mapped column alias list, rather than just copying the parent RTE's alias node. (We have to fill the alias field, not just adjust the eref field, because ruleutils.c will ignore eref in favor of looking at the real column names.) This means that child tables will now always have alias fields in plan rtables, where before they might not have. That results in a rather substantial set of regression test output changes: EXPLAIN will now always show child tables with aliases that match the parent table (usually with "_N" appended for uniqueness). But that seems like a net positive for understandability, since the parent alias corresponds to something that actually appeared in the original query, while the child table names didn't. (Note that this does not change anything for cases where an explicit table alias was written in the query for the parent table; it just makes cases without such aliases behave similarly to that.) Hence, while we could avoid these subsidiary changes if we made inherit.c more complicated, we choose not to. Discussion: https://postgr.es/m/12424.1575168015@sss.pgh.pa.us
2019-12-03 01:08:10 +01:00
QUERY PLAN
---------------------------------------------------------------------------------------------
Result
InitPlan 1
-> Limit
-> Merge Append
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
Sort Key: minmaxtest.f1
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4
InitPlan 2
-> Limit
-> Merge Append
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
Sort Key: minmaxtest_5.f1 DESC
-> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9
Support using index-only scans with partial indexes in more cases. Previously, the planner would reject an index-only scan if any restriction clause for its table used a column not available from the index, even if that restriction clause would later be dropped from the plan entirely because it's implied by the index's predicate. This is a fairly common situation for partial indexes because predicates using columns not included in the index are often the most useful kind of predicate, and we have to duplicate (or at least imply) the predicate in the WHERE clause in order to get the index to be considered at all. So index-only scans were essentially unavailable with such partial indexes. To fix, we have to do detection of implied-by-predicate clauses much earlier in the planner. This patch puts it in check_index_predicates (nee check_partial_indexes), meaning it gets done for every partial index, whereas we previously only considered this issue at createplan time, so that the work was only done for an index actually selected for use. That could result in a noticeable planning slowdown for queries against tables with many partial indexes. However, testing suggested that there isn't really a significant cost, especially not with reasonable numbers of partial indexes. We do get a small additional benefit, which is that cost_index is more accurate since it correctly discounts the evaluation cost of clauses that will be removed. We can also avoid considering such clauses as potential indexquals, which saves useless matching cycles in the case where the predicate columns aren't in the index, and prevents generating bogus plans that double-count the clause's selectivity when the columns are in the index. Tomas Vondra and Kyotaro Horiguchi, reviewed by Kevin Grittner and Konstantin Knizhnik, and whacked around a little by me
2016-03-31 20:48:56 +02:00
(23 rows)
select min(f1), max(f1) from minmaxtest;
min | max
-----+-----
11 | 18
(1 row)
-- DISTINCT doesn't do anything useful here, but it shouldn't fail
explain (costs off)
select distinct min(f1), max(f1) from minmaxtest;
Fix EXPLAIN's column alias output for mismatched child tables. If an inheritance/partitioning parent table is assigned some column alias names in the query, EXPLAIN mapped those aliases onto the child tables' columns by physical position, resulting in bogus output if a child table's columns aren't one-for-one with the parent's. To fix, make expand_single_inheritance_child() generate a correctly re-mapped column alias list, rather than just copying the parent RTE's alias node. (We have to fill the alias field, not just adjust the eref field, because ruleutils.c will ignore eref in favor of looking at the real column names.) This means that child tables will now always have alias fields in plan rtables, where before they might not have. That results in a rather substantial set of regression test output changes: EXPLAIN will now always show child tables with aliases that match the parent table (usually with "_N" appended for uniqueness). But that seems like a net positive for understandability, since the parent alias corresponds to something that actually appeared in the original query, while the child table names didn't. (Note that this does not change anything for cases where an explicit table alias was written in the query for the parent table; it just makes cases without such aliases behave similarly to that.) Hence, while we could avoid these subsidiary changes if we made inherit.c more complicated, we choose not to. Discussion: https://postgr.es/m/12424.1575168015@sss.pgh.pa.us
2019-12-03 01:08:10 +01:00
QUERY PLAN
---------------------------------------------------------------------------------------------
Revert "Optimize order of GROUP BY keys". This reverts commit db0d67db2401eb6238ccc04c6407a4fd4f985832 and several follow-on fixes. The idea of making a cost-based choice of the order of the sorting columns is not fundamentally unsound, but it requires cost information and data statistics that we don't really have. For example, relying on procost to distinguish the relative costs of different sort comparators is pretty pointless so long as most such comparator functions are labeled with cost 1.0. Moreover, estimating the number of comparisons done by Quicksort requires more than just an estimate of the number of distinct values in the input: you also need some idea of the sizes of the larger groups, if you want an estimate that's good to better than a factor of three or so. That's data that's often unknown or not very reliable. Worse, to arrive at estimates of the number of calls made to the lower-order-column comparison functions, the code needs to make estimates of the numbers of distinct values of multiple columns, which are necessarily even less trustworthy than per-column stats. Even if all the inputs are perfectly reliable, the cost algorithm as-implemented cannot offer useful information about how to order sorting columns beyond the point at which the average group size is estimated to drop to 1. Close inspection of the code added by db0d67db2 shows that there are also multiple small bugs. These could have been fixed, but there's not much point if we don't trust the estimates to be accurate in-principle. Finally, the changes in cost_sort's behavior made for very large changes (often a factor of 2 or so) in the cost estimates for all sorting operations, not only those for multi-column GROUP BY. That naturally changes plan choices in many situations, and there's precious little evidence to show that the changes are for the better. Given the above doubts about whether the new estimates are really trustworthy, it's hard to summon much confidence that these changes are better on the average. Since we're hard up against the release deadline for v15, let's revert these changes for now. We can always try again later. Note: in v15, I left T_PathKeyInfo in place in nodes.h even though it's unreferenced. Removing it would be an ABI break, and it seems a bit late in the release cycle for that. Discussion: https://postgr.es/m/TYAPR01MB586665EB5FB2C3807E893941F5579@TYAPR01MB5866.jpnprd01.prod.outlook.com
2022-10-03 16:56:16 +02:00
Unique
InitPlan 1
-> Limit
-> Merge Append
Sort Key: minmaxtest.f1
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4
InitPlan 2
-> Limit
-> Merge Append
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
Sort Key: minmaxtest_5.f1 DESC
-> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8
Index Cond: (f1 IS NOT NULL)
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9
Revert "Optimize order of GROUP BY keys". This reverts commit db0d67db2401eb6238ccc04c6407a4fd4f985832 and several follow-on fixes. The idea of making a cost-based choice of the order of the sorting columns is not fundamentally unsound, but it requires cost information and data statistics that we don't really have. For example, relying on procost to distinguish the relative costs of different sort comparators is pretty pointless so long as most such comparator functions are labeled with cost 1.0. Moreover, estimating the number of comparisons done by Quicksort requires more than just an estimate of the number of distinct values in the input: you also need some idea of the sizes of the larger groups, if you want an estimate that's good to better than a factor of three or so. That's data that's often unknown or not very reliable. Worse, to arrive at estimates of the number of calls made to the lower-order-column comparison functions, the code needs to make estimates of the numbers of distinct values of multiple columns, which are necessarily even less trustworthy than per-column stats. Even if all the inputs are perfectly reliable, the cost algorithm as-implemented cannot offer useful information about how to order sorting columns beyond the point at which the average group size is estimated to drop to 1. Close inspection of the code added by db0d67db2 shows that there are also multiple small bugs. These could have been fixed, but there's not much point if we don't trust the estimates to be accurate in-principle. Finally, the changes in cost_sort's behavior made for very large changes (often a factor of 2 or so) in the cost estimates for all sorting operations, not only those for multi-column GROUP BY. That naturally changes plan choices in many situations, and there's precious little evidence to show that the changes are for the better. Given the above doubts about whether the new estimates are really trustworthy, it's hard to summon much confidence that these changes are better on the average. Since we're hard up against the release deadline for v15, let's revert these changes for now. We can always try again later. Note: in v15, I left T_PathKeyInfo in place in nodes.h even though it's unreferenced. Removing it would be an ABI break, and it seems a bit late in the release cycle for that. Discussion: https://postgr.es/m/TYAPR01MB586665EB5FB2C3807E893941F5579@TYAPR01MB5866.jpnprd01.prod.outlook.com
2022-10-03 16:56:16 +02:00
-> Sort
Sort Key: ((InitPlan 1).col1), ((InitPlan 2).col1)
Revert "Optimize order of GROUP BY keys". This reverts commit db0d67db2401eb6238ccc04c6407a4fd4f985832 and several follow-on fixes. The idea of making a cost-based choice of the order of the sorting columns is not fundamentally unsound, but it requires cost information and data statistics that we don't really have. For example, relying on procost to distinguish the relative costs of different sort comparators is pretty pointless so long as most such comparator functions are labeled with cost 1.0. Moreover, estimating the number of comparisons done by Quicksort requires more than just an estimate of the number of distinct values in the input: you also need some idea of the sizes of the larger groups, if you want an estimate that's good to better than a factor of three or so. That's data that's often unknown or not very reliable. Worse, to arrive at estimates of the number of calls made to the lower-order-column comparison functions, the code needs to make estimates of the numbers of distinct values of multiple columns, which are necessarily even less trustworthy than per-column stats. Even if all the inputs are perfectly reliable, the cost algorithm as-implemented cannot offer useful information about how to order sorting columns beyond the point at which the average group size is estimated to drop to 1. Close inspection of the code added by db0d67db2 shows that there are also multiple small bugs. These could have been fixed, but there's not much point if we don't trust the estimates to be accurate in-principle. Finally, the changes in cost_sort's behavior made for very large changes (often a factor of 2 or so) in the cost estimates for all sorting operations, not only those for multi-column GROUP BY. That naturally changes plan choices in many situations, and there's precious little evidence to show that the changes are for the better. Given the above doubts about whether the new estimates are really trustworthy, it's hard to summon much confidence that these changes are better on the average. Since we're hard up against the release deadline for v15, let's revert these changes for now. We can always try again later. Note: in v15, I left T_PathKeyInfo in place in nodes.h even though it's unreferenced. Removing it would be an ABI break, and it seems a bit late in the release cycle for that. Discussion: https://postgr.es/m/TYAPR01MB586665EB5FB2C3807E893941F5579@TYAPR01MB5866.jpnprd01.prod.outlook.com
2022-10-03 16:56:16 +02:00
-> Result
(26 rows)
select distinct min(f1), max(f1) from minmaxtest;
min | max
-----+-----
11 | 18
(1 row)
drop table minmaxtest cascade;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table minmaxtest1
drop cascades to table minmaxtest2
drop cascades to table minmaxtest3
-- check for correct detection of nested-aggregate errors
select max(min(unique1)) from tenk1;
ERROR: aggregate function calls cannot be nested
LINE 1: select max(min(unique1)) from tenk1;
^
select (select max(min(unique1)) from int8_tbl) from tenk1;
ERROR: aggregate function calls cannot be nested
LINE 1: select (select max(min(unique1)) from int8_tbl) from tenk1;
^
select avg((select avg(a1.col1 order by (select avg(a2.col2) from tenk1 a3))
from tenk1 a1(col1)))
from tenk1 a2(col2);
ERROR: aggregate function calls cannot be nested
LINE 1: select avg((select avg(a1.col1 order by (select avg(a2.col2)...
^
--
-- Test removal of redundant GROUP BY columns
--
create temp table t1 (a int, b int, c int, d int, primary key (a, b));
create temp table t2 (x int, y int, z int, primary key (x, y));
create temp table t3 (a int, b int, c int, primary key(a, b) deferrable);
-- Non-primary-key columns can be removed from GROUP BY
explain (costs off) select * from t1 group by a,b,c,d;
QUERY PLAN
----------------------
HashAggregate
Group Key: a, b
-> Seq Scan on t1
(3 rows)
-- No removal can happen if the complete PK is not present in GROUP BY
explain (costs off) select a,c from t1 group by a,c,d;
QUERY PLAN
----------------------
HashAggregate
Group Key: a, c, d
-> Seq Scan on t1
(3 rows)
-- Test removal across multiple relations
explain (costs off) select *
from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.y,t2.z;
QUERY PLAN
------------------------------------------------------
HashAggregate
Remove redundant grouping and DISTINCT columns. Avoid explicitly grouping by columns that we know are redundant for sorting, for example we need group by only one of x and y in SELECT ... WHERE x = y GROUP BY x, y This comes up more often than you might think, as shown by the changes in the regression tests. It's nearly free to detect too, since we are just piggybacking on the existing logic that detects redundant pathkeys. (In some of the existing plans that change, it's visible that a sort step preceding the grouping step already didn't bother to sort by the redundant column, making the old plan a bit silly-looking.) To do this, build processed_groupClause and processed_distinctClause lists that omit any provably-redundant sort items, and consult those not the originals where relevant. This means that within the planner, one should usually consult root->processed_groupClause or root->processed_distinctClause if one wants to know which columns are to be grouped on; but to check whether grouping or distinct-ing is happening at all, check non-NIL-ness of parse->groupClause or parse->distinctClause. This is comparable to longstanding rules about handling the HAVING clause, so I don't think it'll be a huge maintenance problem. nodeAgg.c also needs minor mods, because it's now possible to generate AGG_PLAIN and AGG_SORTED Agg nodes with zero grouping columns. Patch by me; thanks to Richard Guo and David Rowley for review. Discussion: https://postgr.es/m/185315.1672179489@sss.pgh.pa.us
2023-01-18 18:37:57 +01:00
Group Key: t1.a, t1.b
-> Hash Join
Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b))
-> Seq Scan on t2
-> Hash
-> Seq Scan on t1
(7 rows)
-- Test case where t1 can be optimized but not t2
explain (costs off) select t1.*,t2.x,t2.z
from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.z;
QUERY PLAN
------------------------------------------------------
HashAggregate
Remove redundant grouping and DISTINCT columns. Avoid explicitly grouping by columns that we know are redundant for sorting, for example we need group by only one of x and y in SELECT ... WHERE x = y GROUP BY x, y This comes up more often than you might think, as shown by the changes in the regression tests. It's nearly free to detect too, since we are just piggybacking on the existing logic that detects redundant pathkeys. (In some of the existing plans that change, it's visible that a sort step preceding the grouping step already didn't bother to sort by the redundant column, making the old plan a bit silly-looking.) To do this, build processed_groupClause and processed_distinctClause lists that omit any provably-redundant sort items, and consult those not the originals where relevant. This means that within the planner, one should usually consult root->processed_groupClause or root->processed_distinctClause if one wants to know which columns are to be grouped on; but to check whether grouping or distinct-ing is happening at all, check non-NIL-ness of parse->groupClause or parse->distinctClause. This is comparable to longstanding rules about handling the HAVING clause, so I don't think it'll be a huge maintenance problem. nodeAgg.c also needs minor mods, because it's now possible to generate AGG_PLAIN and AGG_SORTED Agg nodes with zero grouping columns. Patch by me; thanks to Richard Guo and David Rowley for review. Discussion: https://postgr.es/m/185315.1672179489@sss.pgh.pa.us
2023-01-18 18:37:57 +01:00
Group Key: t1.a, t1.b, t2.z
-> Hash Join
Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b))
-> Seq Scan on t2
-> Hash
-> Seq Scan on t1
(7 rows)
-- Cannot optimize when PK is deferrable
explain (costs off) select * from t3 group by a,b,c;
QUERY PLAN
----------------------
HashAggregate
Group Key: a, b, c
-> Seq Scan on t3
(3 rows)
create temp table t1c () inherits (t1);
-- Ensure we don't remove any columns when t1 has a child table
explain (costs off) select * from t1 group by a,b,c,d;
QUERY PLAN
-------------------------------------
HashAggregate
Group Key: t1.a, t1.b, t1.c, t1.d
-> Append
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Seq Scan on t1 t1_1
-> Seq Scan on t1c t1_2
(5 rows)
-- Okay to remove columns if we're only querying the parent.
explain (costs off) select * from only t1 group by a,b,c,d;
QUERY PLAN
----------------------
HashAggregate
Group Key: a, b
-> Seq Scan on t1
(3 rows)
create temp table p_t1 (
a int,
b int,
c int,
d int,
primary key(a,b)
) partition by list(a);
create temp table p_t1_1 partition of p_t1 for values in(1);
create temp table p_t1_2 partition of p_t1 for values in(2);
-- Ensure we can remove non-PK columns for partitioned tables.
explain (costs off) select * from p_t1 group by a,b,c,d;
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
QUERY PLAN
--------------------------------
HashAggregate
Fix EXPLAIN's column alias output for mismatched child tables. If an inheritance/partitioning parent table is assigned some column alias names in the query, EXPLAIN mapped those aliases onto the child tables' columns by physical position, resulting in bogus output if a child table's columns aren't one-for-one with the parent's. To fix, make expand_single_inheritance_child() generate a correctly re-mapped column alias list, rather than just copying the parent RTE's alias node. (We have to fill the alias field, not just adjust the eref field, because ruleutils.c will ignore eref in favor of looking at the real column names.) This means that child tables will now always have alias fields in plan rtables, where before they might not have. That results in a rather substantial set of regression test output changes: EXPLAIN will now always show child tables with aliases that match the parent table (usually with "_N" appended for uniqueness). But that seems like a net positive for understandability, since the parent alias corresponds to something that actually appeared in the original query, while the child table names didn't. (Note that this does not change anything for cases where an explicit table alias was written in the query for the parent table; it just makes cases without such aliases behave similarly to that.) Hence, while we could avoid these subsidiary changes if we made inherit.c more complicated, we choose not to. Discussion: https://postgr.es/m/12424.1575168015@sss.pgh.pa.us
2019-12-03 01:08:10 +01:00
Group Key: p_t1.a, p_t1.b
-> Append
Further adjust EXPLAIN's choices of table alias names. This patch causes EXPLAIN to always assign a separate table alias to the parent RTE of an append relation (inheritance set); before, such RTEs were ignored if not actually scanned by the plan. Since the child RTEs now always have that same alias to start with (cf. commit 55a1954da), the net effect is that the parent RTE usually gets the alias used or implied by the query text, and the children all get that alias with "_N" appended. (The exception to "usually" is if there are duplicate aliases in different subtrees of the original query; then some of those original RTEs will also have "_N" appended.) This results in more uniform output for partitioned-table plans than we had before: the partitioned table itself gets the original alias, and all child tables have aliases with "_N", rather than the previous behavior where one of the children would get an alias without "_N". The reason for giving the parent RTE an alias, even if it isn't scanned by the plan, is that we now use the parent's alias to qualify Vars that refer to an appendrel output column and appear above the Append or MergeAppend that computes the appendrel. But below the append, Vars refer to some one of the child relations, and are displayed that way. This seems clearer than the old behavior where a Var that could carry values from any child relation was displayed as if it referred to only one of them. While at it, change ruleutils.c so that the code paths used by EXPLAIN deal in Plan trees not PlanState trees. This effectively reverts a decision made in commit 1cc29fe7c, which seemed like a good idea at the time to make ruleutils.c consistent with explain.c. However, it's problematic because we'd really like to allow executor startup pruning to remove all the children of an append node when possible, leaving no child PlanState to resolve Vars against. (That's not done here, but will be in the next patch.) This requires different handling of subplans and initplans than before, but is otherwise a pretty straightforward change. Discussion: https://postgr.es/m/001001d4f44b$2a2cca50$7e865ef0$@lab.ntt.co.jp
2019-12-11 23:05:18 +01:00
-> Seq Scan on p_t1_1
-> Seq Scan on p_t1_2
(5 rows)
drop table t1 cascade;
NOTICE: drop cascades to table t1c
drop table t2;
drop table t3;
drop table p_t1;
--
-- Test GROUP BY matching of join columns that are type-coerced due to USING
--
Make Vars be outer-join-aware. Traditionally we used the same Var struct to represent the value of a table column everywhere in parse and plan trees. This choice predates our support for SQL outer joins, and it's really a pretty bad idea with outer joins, because the Var's value can depend on where it is in the tree: it might go to NULL above an outer join. So expression nodes that are equal() per equalfuncs.c might not represent the same value, which is a huge correctness hazard for the planner. To improve this, decorate Var nodes with a bitmapset showing which outer joins (identified by RTE indexes) may have nulled them at the point in the parse tree where the Var appears. This allows us to trust that equal() Vars represent the same value. A certain amount of klugery is still needed to cope with cases where we re-order two outer joins, but it's possible to make it work without sacrificing that core principle. PlaceHolderVars receive similar decoration for the same reason. In the planner, we include these outer join bitmapsets into the relids that an expression is considered to depend on, and in consequence also add outer-join relids to the relids of join RelOptInfos. This allows us to correctly perceive whether an expression can be calculated above or below a particular outer join. This change affects FDWs that want to plan foreign joins. They *must* follow suit when labeling foreign joins in order to match with the core planner, but for many purposes (if postgres_fdw is any guide) they'd prefer to consider only base relations within the join. To support both requirements, redefine ForeignScan.fs_relids as base+OJ relids, and add a new field fs_base_relids that's set up by the core planner. Large though it is, this commit just does the minimum necessary to install the new mechanisms and get check-world passing again. Follow-up patches will perform some cleanup. (The README additions and comments mention some stuff that will appear in the follow-up.) Patch by me; thanks to Richard Guo for review. Discussion: https://postgr.es/m/830269.1656693747@sss.pgh.pa.us
2023-01-30 19:16:20 +01:00
create temp table t1(f1 int, f2 int);
create temp table t2(f1 bigint, f2 oid);
select f1 from t1 left join t2 using (f1) group by f1;
f1
----
(0 rows)
select f1 from t1 left join t2 using (f1) group by t1.f1;
f1
----
(0 rows)
select t1.f1 from t1 left join t2 using (f1) group by t1.f1;
f1
----
(0 rows)
-- only this one should fail:
select t1.f1 from t1 left join t2 using (f1) group by f1;
ERROR: column "t1.f1" must appear in the GROUP BY clause or be used in an aggregate function
LINE 1: select t1.f1 from t1 left join t2 using (f1) group by f1;
^
Make Vars be outer-join-aware. Traditionally we used the same Var struct to represent the value of a table column everywhere in parse and plan trees. This choice predates our support for SQL outer joins, and it's really a pretty bad idea with outer joins, because the Var's value can depend on where it is in the tree: it might go to NULL above an outer join. So expression nodes that are equal() per equalfuncs.c might not represent the same value, which is a huge correctness hazard for the planner. To improve this, decorate Var nodes with a bitmapset showing which outer joins (identified by RTE indexes) may have nulled them at the point in the parse tree where the Var appears. This allows us to trust that equal() Vars represent the same value. A certain amount of klugery is still needed to cope with cases where we re-order two outer joins, but it's possible to make it work without sacrificing that core principle. PlaceHolderVars receive similar decoration for the same reason. In the planner, we include these outer join bitmapsets into the relids that an expression is considered to depend on, and in consequence also add outer-join relids to the relids of join RelOptInfos. This allows us to correctly perceive whether an expression can be calculated above or below a particular outer join. This change affects FDWs that want to plan foreign joins. They *must* follow suit when labeling foreign joins in order to match with the core planner, but for many purposes (if postgres_fdw is any guide) they'd prefer to consider only base relations within the join. To support both requirements, redefine ForeignScan.fs_relids as base+OJ relids, and add a new field fs_base_relids that's set up by the core planner. Large though it is, this commit just does the minimum necessary to install the new mechanisms and get check-world passing again. Follow-up patches will perform some cleanup. (The README additions and comments mention some stuff that will appear in the follow-up.) Patch by me; thanks to Richard Guo for review. Discussion: https://postgr.es/m/830269.1656693747@sss.pgh.pa.us
2023-01-30 19:16:20 +01:00
-- check case where we have to inject nullingrels into coerced join alias
select f1, count(*) from
t1 x(x0,x1) left join (t1 left join t2 using(f1)) on (x0 = 0)
group by f1;
f1 | count
----+-------
(0 rows)
-- same, for a RelabelType coercion
select f2, count(*) from
t1 x(x0,x1) left join (t1 left join t2 using(f2)) on (x0 = 0)
group by f2;
f2 | count
----+-------
(0 rows)
drop table t1, t2;
Improve performance of ORDER BY / DISTINCT aggregates ORDER BY / DISTINCT aggreagtes have, since implemented in Postgres, been executed by always performing a sort in nodeAgg.c to sort the tuples in the current group into the correct order before calling the transition function on the sorted tuples. This was not great as often there might be an index that could have provided pre-sorted input and allowed the transition functions to be called as the rows come in, rather than having to store them in a tuplestore in order to sort them once all the tuples for the group have arrived. Here we change the planner so it requests a path with a sort order which supports the most amount of ORDER BY / DISTINCT aggregate functions and add new code to the executor to allow it to support the processing of ORDER BY / DISTINCT aggregates where the tuples are already sorted in the correct order. Since there can be many ORDER BY / DISTINCT aggregates in any given query level, it's very possible that we can't find an order that suits all of these aggregates. The sort order that the planner chooses is simply the one that suits the most aggregate functions. We take the most strictly sorted variation of each order and see how many aggregate functions can use that, then we try again with the order of the remaining aggregates to see if another order would suit more aggregate functions. For example: SELECT agg(a ORDER BY a),agg2(a ORDER BY a,b) ... would request the sort order to be {a, b} because {a} is a subset of the sort order of {a,b}, but; SELECT agg(a ORDER BY a),agg2(a ORDER BY c) ... would just pick a plan ordered by {a} (we give precedence to aggregates which are earlier in the targetlist). SELECT agg(a ORDER BY a),agg2(a ORDER BY b),agg3(a ORDER BY b) ... would choose to order by {b} since two aggregates suit that vs just one that requires input ordered by {a}. Author: David Rowley Reviewed-by: Ronan Dunklau, James Coleman, Ranier Vilela, Richard Guo, Tom Lane Discussion: https://postgr.es/m/CAApHDvpHzfo92%3DR4W0%2BxVua3BUYCKMckWAmo-2t_KiXN-wYH%3Dw%40mail.gmail.com
2022-08-02 13:11:45 +02:00
--
-- Test planner's selection of pathkeys for ORDER BY aggregates
--
-- Ensure we order by four. This suits the most aggregate functions.
explain (costs off)
select sum(two order by two),max(four order by four), min(four order by four)
from tenk1;
QUERY PLAN
-------------------------------
Aggregate
-> Sort
Sort Key: four
-> Seq Scan on tenk1
(4 rows)
-- Ensure we order by two. It's a tie between ordering by two and four but
-- we tiebreak on the aggregate's position.
explain (costs off)
select
sum(two order by two), max(four order by four),
min(four order by four), max(two order by two)
from tenk1;
QUERY PLAN
-------------------------------
Aggregate
-> Sort
Sort Key: two
-> Seq Scan on tenk1
(4 rows)
-- Similar to above, but tiebreak on ordering by four
explain (costs off)
select
max(four order by four), sum(two order by two),
min(four order by four), max(two order by two)
from tenk1;
QUERY PLAN
-------------------------------
Aggregate
-> Sort
Sort Key: four
-> Seq Scan on tenk1
(4 rows)
-- Ensure this one orders by ten since there are 3 aggregates that require ten
-- vs two that suit two and four.
explain (costs off)
select
max(four order by four), sum(two order by two),
min(four order by four), max(two order by two),
sum(ten order by ten), min(ten order by ten), max(ten order by ten)
from tenk1;
QUERY PLAN
-------------------------------
Aggregate
-> Sort
Sort Key: ten
-> Seq Scan on tenk1
(4 rows)
-- Try a case involving a GROUP BY clause where the GROUP BY column is also
-- part of an aggregate's ORDER BY clause. We want a sort order that works
-- for the GROUP BY along with the first and the last aggregate.
explain (costs off)
select
sum(unique1 order by ten, two), sum(unique1 order by four),
sum(unique1 order by two, four)
from tenk1
group by ten;
QUERY PLAN
----------------------------------
GroupAggregate
Group Key: ten
-> Sort
Sort Key: ten, two, four
-> Seq Scan on tenk1
(5 rows)
-- Ensure that we never choose to provide presorted input to an Aggref with
-- a volatile function in the ORDER BY / DISTINCT clause. We want to ensure
-- these sorts are performed individually rather than at the query level.
explain (costs off)
select
sum(unique1 order by two), sum(unique1 order by four),
sum(unique1 order by four, two), sum(unique1 order by two, random()),
sum(unique1 order by two, random(), random() + 1)
from tenk1
group by ten;
QUERY PLAN
----------------------------------
GroupAggregate
Group Key: ten
-> Sort
Sort Key: ten, four, two
-> Seq Scan on tenk1
(5 rows)
-- Ensure consecutive NULLs are properly treated as distinct from each other
select array_agg(distinct val)
from (select null as val from generate_series(1, 2));
array_agg
-----------
{NULL}
(1 row)
Add enable_presorted_aggregate GUC 1349d279 added query planner support to allow more efficient execution of aggregate functions which have an ORDER BY or a DISTINCT clause. Prior to that commit, the planner would only request that the lower planner produce a plan with the order required for the GROUP BY clause and it would be left up to nodeAgg.c to perform the final sort of records within each group so that the aggregate transition functions were called in the correct order. Now that the planner requests the lower planner produce a plan with the GROUP BY and the ORDER BY / DISTINCT aggregates in mind, there is the possibility that the planner chooses a plan which could be less efficient than what would have been produced before 1349d279. While developing 1349d279, I had in mind that Incremental Sort would help us in cases where an index exists only on the GROUP BY column(s). Incremental Sort would just replace the implicit tuplesorts which are being performed in nodeAgg.c. However, because the planner has the flexibility to instead choose a plan which just performs a full sort on both the GROUP BY and ORDER BY / DISTINCT aggregate columns, there is potential for the planner to make a bad choice. The costing for Incremental Sort is not perfect as it assumes an even distribution of rows to sort within each sort group. Here we add an escape hatch in the form of the enable_presorted_aggregate GUC. This will allow users to get the pre-PG16 behavior in cases where they have no other means to convince the query planner to produce a plan which only sorts on the GROUP BY column(s). Discussion: https://postgr.es/m/CAApHDvr1Sm+g9hbv4REOVuvQKeDWXcKUAhmbK5K+dfun0s9CvA@mail.gmail.com
2022-12-20 10:28:58 +01:00
-- Ensure no ordering is requested when enable_presorted_aggregate is off
set enable_presorted_aggregate to off;
explain (costs off)
select sum(two order by two) from tenk1;
QUERY PLAN
-------------------------
Aggregate
-> Seq Scan on tenk1
(2 rows)
reset enable_presorted_aggregate;
--
-- Test combinations of DISTINCT and/or ORDER BY
--
select array_agg(a order by b)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{3,4,2,1}
(1 row)
select array_agg(a order by a)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{1,2,3,4}
(1 row)
select array_agg(a order by a desc)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{4,3,2,1}
(1 row)
select array_agg(b order by a desc)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{2,1,3,4}
(1 row)
select array_agg(distinct a)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{1,2,3,NULL}
(1 row)
select array_agg(distinct a order by a)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{1,2,3,NULL}
(1 row)
select array_agg(distinct a order by a desc)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{NULL,3,2,1}
(1 row)
select array_agg(distinct a order by a desc nulls last)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{3,2,1,NULL}
(1 row)
-- multi-arg aggs, strict/nonstrict, distinct/order by
select aggfstr(a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
aggfstr
---------------------------------------
{"(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfns(a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
aggfns
-----------------------------------------------
{"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfstr(distinct a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfstr
---------------------------------------
{"(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfns(distinct a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfstr(distinct a,b,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfstr
---------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)"}
(1 row)
select aggfns(distinct a,b,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfns
-----------------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
(1 row)
-- test specific code paths
select aggfns(distinct a,a,c order by c using ~<~,a)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
------------------------------------------------
{"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"}
(1 row)
select aggfns(distinct a,a,c order by c using ~<~)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
------------------------------------------------
{"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"}
(1 row)
select aggfns(distinct a,a,c order by a)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
------------------------------------------------
{"(0,0,)","(1,1,foo)","(2,2,bar)","(3,3,baz)"}
(1 row)
select aggfns(distinct a,b,c order by a,c using ~<~,b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
-- test a more complex permutation that has previous caused issues
select
string_agg(distinct 'a', ','),
sum((
select sum(1)
from (values(1)) b(id)
where a.id = b.id
)) from unnest(array[1]) a(id);
string_agg | sum
------------+-----
a | 1
(1 row)
-- check node I/O via view creation and usage, also deparsing logic
create view agg_view1 as
select aggfns(a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
-----------------------------------------------
{"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
Get rid of the "new" and "old" entries in a view's rangetable. The rule system needs "old" and/or "new" pseudo-RTEs in rule actions that are ON INSERT/UPDATE/DELETE. Historically it's put such entries into the ON SELECT rules of views as well, but those are really quite vestigial. The only thing we've used them for is to carry the view's relid forward to AcquireExecutorLocks (so that we can re-lock the view to verify it hasn't changed before re-using a plan) and to carry its relid and permissions data forward to execution-time permissions checks. What we can do instead of that is to retain these fields of the RTE_RELATION RTE for the view even after we convert it to an RTE_SUBQUERY RTE. This requires a tiny amount of extra complication in the planner and AcquireExecutorLocks, but on the other hand we can get rid of the logic that moves that data from one place to another. The principal immediate benefit of doing this, aside from a small saving in the pg_rewrite data for views, is that these pseudo-RTEs no longer trigger ruleutils.c's heuristic about qualifying variable names when the rangetable's length is more than 1. That results in quite a number of small simplifications in regression test outputs, which are all to the good IMO. Bump catversion because we need to dump a few more fields of RTE_SUBQUERY RTEs. While those will always be zeroes anyway in stored rules (because we'd never populate them until query rewrite) they are useful for debugging, and it seems like we'd better make sure to transmit such RTEs accurately in plans sent to parallel workers. I don't think the executor actually examines these fields after startup, but someday it might. This is a second attempt at committing 1b4d280ea. The difference from the first time is that now we can add some filtering rules to AdjustUpgrade.pm to allow cross-version upgrade testing to pass despite all the cosmetic changes in CREATE VIEW outputs. Amit Langote (filtering rules by me) Discussion: https://postgr.es/m/CA+HiwqEf7gPN4Hn+LoZ4tP2q_Qt7n3vw7-6fJKOf92tSEnX6Gg@mail.gmail.com Discussion: https://postgr.es/m/891521.1673657296@sss.pgh.pa.us
2023-01-18 19:23:57 +01:00
SELECT aggfns(a, b, c) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(distinct a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
select * from agg_view1;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(DISTINCT v.a, v.b, v.c) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
generate_series(1, 3) i(i);
(1 row)
create or replace view agg_view1 as
select aggfns(distinct a,b,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
select * from agg_view1;
aggfns
-----------------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.b) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
generate_series(1, 3) i(i);
(1 row)
create or replace view agg_view1 as
select aggfns(a,b,c order by b+1)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
-----------------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
Get rid of the "new" and "old" entries in a view's rangetable. The rule system needs "old" and/or "new" pseudo-RTEs in rule actions that are ON INSERT/UPDATE/DELETE. Historically it's put such entries into the ON SELECT rules of views as well, but those are really quite vestigial. The only thing we've used them for is to carry the view's relid forward to AcquireExecutorLocks (so that we can re-lock the view to verify it hasn't changed before re-using a plan) and to carry its relid and permissions data forward to execution-time permissions checks. What we can do instead of that is to retain these fields of the RTE_RELATION RTE for the view even after we convert it to an RTE_SUBQUERY RTE. This requires a tiny amount of extra complication in the planner and AcquireExecutorLocks, but on the other hand we can get rid of the logic that moves that data from one place to another. The principal immediate benefit of doing this, aside from a small saving in the pg_rewrite data for views, is that these pseudo-RTEs no longer trigger ruleutils.c's heuristic about qualifying variable names when the rangetable's length is more than 1. That results in quite a number of small simplifications in regression test outputs, which are all to the good IMO. Bump catversion because we need to dump a few more fields of RTE_SUBQUERY RTEs. While those will always be zeroes anyway in stored rules (because we'd never populate them until query rewrite) they are useful for debugging, and it seems like we'd better make sure to transmit such RTEs accurately in plans sent to parallel workers. I don't think the executor actually examines these fields after startup, but someday it might. This is a second attempt at committing 1b4d280ea. The difference from the first time is that now we can add some filtering rules to AdjustUpgrade.pm to allow cross-version upgrade testing to pass despite all the cosmetic changes in CREATE VIEW outputs. Amit Langote (filtering rules by me) Discussion: https://postgr.es/m/CA+HiwqEf7gPN4Hn+LoZ4tP2q_Qt7n3vw7-6fJKOf92tSEnX6Gg@mail.gmail.com Discussion: https://postgr.es/m/891521.1673657296@sss.pgh.pa.us
2023-01-18 19:23:57 +01:00
SELECT aggfns(a, b, c ORDER BY (b + 1)) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(a,a,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
------------------------------------------------
{"(3,3,baz)","(2,2,bar)","(1,1,foo)","(0,0,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
Get rid of the "new" and "old" entries in a view's rangetable. The rule system needs "old" and/or "new" pseudo-RTEs in rule actions that are ON INSERT/UPDATE/DELETE. Historically it's put such entries into the ON SELECT rules of views as well, but those are really quite vestigial. The only thing we've used them for is to carry the view's relid forward to AcquireExecutorLocks (so that we can re-lock the view to verify it hasn't changed before re-using a plan) and to carry its relid and permissions data forward to execution-time permissions checks. What we can do instead of that is to retain these fields of the RTE_RELATION RTE for the view even after we convert it to an RTE_SUBQUERY RTE. This requires a tiny amount of extra complication in the planner and AcquireExecutorLocks, but on the other hand we can get rid of the logic that moves that data from one place to another. The principal immediate benefit of doing this, aside from a small saving in the pg_rewrite data for views, is that these pseudo-RTEs no longer trigger ruleutils.c's heuristic about qualifying variable names when the rangetable's length is more than 1. That results in quite a number of small simplifications in regression test outputs, which are all to the good IMO. Bump catversion because we need to dump a few more fields of RTE_SUBQUERY RTEs. While those will always be zeroes anyway in stored rules (because we'd never populate them until query rewrite) they are useful for debugging, and it seems like we'd better make sure to transmit such RTEs accurately in plans sent to parallel workers. I don't think the executor actually examines these fields after startup, but someday it might. This is a second attempt at committing 1b4d280ea. The difference from the first time is that now we can add some filtering rules to AdjustUpgrade.pm to allow cross-version upgrade testing to pass despite all the cosmetic changes in CREATE VIEW outputs. Amit Langote (filtering rules by me) Discussion: https://postgr.es/m/CA+HiwqEf7gPN4Hn+LoZ4tP2q_Qt7n3vw7-6fJKOf92tSEnX6Gg@mail.gmail.com Discussion: https://postgr.es/m/891521.1673657296@sss.pgh.pa.us
2023-01-18 19:23:57 +01:00
SELECT aggfns(a, a, c ORDER BY b) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(a,b,c order by c using ~<~)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
-----------------------------------------------
{"(2,2,bar)","(3,1,baz)","(1,3,foo)","(0,,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
Get rid of the "new" and "old" entries in a view's rangetable. The rule system needs "old" and/or "new" pseudo-RTEs in rule actions that are ON INSERT/UPDATE/DELETE. Historically it's put such entries into the ON SELECT rules of views as well, but those are really quite vestigial. The only thing we've used them for is to carry the view's relid forward to AcquireExecutorLocks (so that we can re-lock the view to verify it hasn't changed before re-using a plan) and to carry its relid and permissions data forward to execution-time permissions checks. What we can do instead of that is to retain these fields of the RTE_RELATION RTE for the view even after we convert it to an RTE_SUBQUERY RTE. This requires a tiny amount of extra complication in the planner and AcquireExecutorLocks, but on the other hand we can get rid of the logic that moves that data from one place to another. The principal immediate benefit of doing this, aside from a small saving in the pg_rewrite data for views, is that these pseudo-RTEs no longer trigger ruleutils.c's heuristic about qualifying variable names when the rangetable's length is more than 1. That results in quite a number of small simplifications in regression test outputs, which are all to the good IMO. Bump catversion because we need to dump a few more fields of RTE_SUBQUERY RTEs. While those will always be zeroes anyway in stored rules (because we'd never populate them until query rewrite) they are useful for debugging, and it seems like we'd better make sure to transmit such RTEs accurately in plans sent to parallel workers. I don't think the executor actually examines these fields after startup, but someday it might. This is a second attempt at committing 1b4d280ea. The difference from the first time is that now we can add some filtering rules to AdjustUpgrade.pm to allow cross-version upgrade testing to pass despite all the cosmetic changes in CREATE VIEW outputs. Amit Langote (filtering rules by me) Discussion: https://postgr.es/m/CA+HiwqEf7gPN4Hn+LoZ4tP2q_Qt7n3vw7-6fJKOf92tSEnX6Gg@mail.gmail.com Discussion: https://postgr.es/m/891521.1673657296@sss.pgh.pa.us
2023-01-18 19:23:57 +01:00
SELECT aggfns(a, b, c ORDER BY c USING ~<~ NULLS LAST) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(distinct a,b,c order by a,c using ~<~,b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
select * from agg_view1;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.a, v.c USING ~<~ NULLS LAST, v.b) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
generate_series(1, 2) i(i);
(1 row)
drop view agg_view1;
-- incorrect DISTINCT usage errors
select aggfns(distinct a,b,c order by i)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,b,c order by i)
^
select aggfns(distinct a,b,c order by a,b+1)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,b,c order by a,b+1)
^
select aggfns(distinct a,b,c order by a,b,i,c)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,b,c order by a,b,i,c)
^
select aggfns(distinct a,a,c order by a,b)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,a,c order by a,b)
^
-- string_agg tests
select string_agg(a,',') from (values('aaaa'),('bbbb'),('cccc')) g(a);
string_agg
----------------
aaaa,bbbb,cccc
(1 row)
select string_agg(a,',') from (values('aaaa'),(null),('bbbb'),('cccc')) g(a);
string_agg
----------------
aaaa,bbbb,cccc
(1 row)
select string_agg(a,'AB') from (values(null),(null),('bbbb'),('cccc')) g(a);
string_agg
------------
bbbbABcccc
(1 row)
select string_agg(a,',') from (values(null),(null)) g(a);
string_agg
------------
(1 row)
-- check some implicit casting cases, as per bug #5564
select string_agg(distinct f1, ',' order by f1) from varchar_tbl; -- ok
string_agg
------------
a,ab,abcd
(1 row)
select string_agg(distinct f1::text, ',' order by f1) from varchar_tbl; -- not ok
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select string_agg(distinct f1::text, ',' order by f1) from v...
^
select string_agg(distinct f1, ',' order by f1::text) from varchar_tbl; -- not ok
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select string_agg(distinct f1, ',' order by f1::text) from v...
^
select string_agg(distinct f1::text, ',' order by f1::text) from varchar_tbl; -- ok
string_agg
------------
a,ab,abcd
(1 row)
-- string_agg bytea tests
create table bytea_test_table(v bytea);
select string_agg(v, '') from bytea_test_table;
string_agg
------------
(1 row)
insert into bytea_test_table values(decode('ff','hex'));
select string_agg(v, '') from bytea_test_table;
string_agg
------------
\xff
(1 row)
insert into bytea_test_table values(decode('aa','hex'));
select string_agg(v, '') from bytea_test_table;
string_agg
------------
\xffaa
(1 row)
select string_agg(v, NULL) from bytea_test_table;
string_agg
------------
\xffaa
(1 row)
select string_agg(v, decode('ee', 'hex')) from bytea_test_table;
string_agg
------------
\xffeeaa
(1 row)
drop table bytea_test_table;
-- Test parallel string_agg and array_agg
create table pagg_test (x int, y int) with (autovacuum_enabled = off);
insert into pagg_test
select (case x % 4 when 1 then null else x end), x % 10
from generate_series(1,5000) x;
set parallel_setup_cost TO 0;
set parallel_tuple_cost TO 0;
set parallel_leader_participation TO 0;
set min_parallel_table_scan_size = 0;
set bytea_output = 'escape';
set max_parallel_workers_per_gather = 2;
-- create a view as we otherwise have to repeat this query a few times.
create view v_pagg_test AS
select
y,
min(t) AS tmin,max(t) AS tmax,count(distinct t) AS tndistinct,
min(b) AS bmin,max(b) AS bmax,count(distinct b) AS bndistinct,
min(a) AS amin,max(a) AS amax,count(distinct a) AS andistinct,
min(aa) AS aamin,max(aa) AS aamax,count(distinct aa) AS aandistinct
from (
select
y,
unnest(regexp_split_to_array(a1.t, ','))::int AS t,
unnest(regexp_split_to_array(a1.b::text, ',')) AS b,
unnest(a1.a) AS a,
unnest(a1.aa) AS aa
from (
select
y,
string_agg(x::text, ',') AS t,
string_agg(x::text::bytea, ',') AS b,
array_agg(x) AS a,
array_agg(ARRAY[x]) AS aa
from pagg_test
group by y
) a1
) a2
group by y;
-- Ensure results are correct.
select * from v_pagg_test order by y;
y | tmin | tmax | tndistinct | bmin | bmax | bndistinct | amin | amax | andistinct | aamin | aamax | aandistinct
---+------+------+------------+------+------+------------+------+------+------------+-------+-------+-------------
0 | 10 | 5000 | 500 | 10 | 990 | 500 | 10 | 5000 | 500 | 10 | 5000 | 500
1 | 11 | 4991 | 250 | 1011 | 991 | 250 | 11 | 4991 | 250 | 11 | 4991 | 250
2 | 2 | 4992 | 500 | 1002 | 992 | 500 | 2 | 4992 | 500 | 2 | 4992 | 500
3 | 3 | 4983 | 250 | 1003 | 983 | 250 | 3 | 4983 | 250 | 3 | 4983 | 250
4 | 4 | 4994 | 500 | 1004 | 994 | 500 | 4 | 4994 | 500 | 4 | 4994 | 500
5 | 15 | 4995 | 250 | 1015 | 995 | 250 | 15 | 4995 | 250 | 15 | 4995 | 250
6 | 6 | 4996 | 500 | 1006 | 996 | 500 | 6 | 4996 | 500 | 6 | 4996 | 500
7 | 7 | 4987 | 250 | 1007 | 987 | 250 | 7 | 4987 | 250 | 7 | 4987 | 250
8 | 8 | 4998 | 500 | 1008 | 998 | 500 | 8 | 4998 | 500 | 8 | 4998 | 500
9 | 19 | 4999 | 250 | 1019 | 999 | 250 | 19 | 4999 | 250 | 19 | 4999 | 250
(10 rows)
-- Ensure parallel aggregation is actually being used.
explain (costs off) select * from v_pagg_test order by y;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------
GroupAggregate
Group Key: pagg_test.y
-> Sort
Sort Key: pagg_test.y, (((unnest(regexp_split_to_array((string_agg((pagg_test.x)::text, ','::text)), ','::text))))::integer)
-> Result
-> ProjectSet
-> Finalize HashAggregate
Group Key: pagg_test.y
-> Gather
Workers Planned: 2
-> Partial HashAggregate
Group Key: pagg_test.y
-> Parallel Seq Scan on pagg_test
(13 rows)
set max_parallel_workers_per_gather = 0;
-- Ensure results are the same without parallel aggregation.
select * from v_pagg_test order by y;
y | tmin | tmax | tndistinct | bmin | bmax | bndistinct | amin | amax | andistinct | aamin | aamax | aandistinct
---+------+------+------------+------+------+------------+------+------+------------+-------+-------+-------------
0 | 10 | 5000 | 500 | 10 | 990 | 500 | 10 | 5000 | 500 | 10 | 5000 | 500
1 | 11 | 4991 | 250 | 1011 | 991 | 250 | 11 | 4991 | 250 | 11 | 4991 | 250
2 | 2 | 4992 | 500 | 1002 | 992 | 500 | 2 | 4992 | 500 | 2 | 4992 | 500
3 | 3 | 4983 | 250 | 1003 | 983 | 250 | 3 | 4983 | 250 | 3 | 4983 | 250
4 | 4 | 4994 | 500 | 1004 | 994 | 500 | 4 | 4994 | 500 | 4 | 4994 | 500
5 | 15 | 4995 | 250 | 1015 | 995 | 250 | 15 | 4995 | 250 | 15 | 4995 | 250
6 | 6 | 4996 | 500 | 1006 | 996 | 500 | 6 | 4996 | 500 | 6 | 4996 | 500
7 | 7 | 4987 | 250 | 1007 | 987 | 250 | 7 | 4987 | 250 | 7 | 4987 | 250
8 | 8 | 4998 | 500 | 1008 | 998 | 500 | 8 | 4998 | 500 | 8 | 4998 | 500
9 | 19 | 4999 | 250 | 1019 | 999 | 250 | 19 | 4999 | 250 | 19 | 4999 | 250
(10 rows)
-- Clean up
reset max_parallel_workers_per_gather;
reset bytea_output;
reset min_parallel_table_scan_size;
reset parallel_leader_participation;
reset parallel_tuple_cost;
reset parallel_setup_cost;
drop view v_pagg_test;
drop table pagg_test;
-- FILTER tests
select min(unique1) filter (where unique1 > 100) from tenk1;
min
-----
101
(1 row)
select sum(1/ten) filter (where ten > 0) from tenk1;
sum
------
1000
(1 row)
select ten, sum(distinct four) filter (where four::text ~ '123') from onek a
group by ten;
ten | sum
-----+-----
0 |
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
(10 rows)
select ten, sum(distinct four) filter (where four > 10) from onek a
group by ten
having exists (select 1 from onek b where sum(distinct a.four) = b.four);
ten | sum
-----+-----
0 |
2 |
4 |
6 |
8 |
(5 rows)
select max(foo COLLATE "C") filter (where (bar collate "POSIX") > '0')
from (values ('a', 'b')) AS v(foo,bar);
max
-----
a
(1 row)
select any_value(v) filter (where v > 2) from (values (1), (2), (3)) as v (v);
any_value
-----------
3
(1 row)
-- outer reference in FILTER (PostgreSQL extension)
select (select count(*)
from (values (1)) t0(inner_c))
from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
count
-------
1
1
(2 rows)
select (select count(*) filter (where outer_c <> 0)
from (values (1)) t0(inner_c))
from (values (2),(3)) t1(outer_c); -- outer query is aggregation query
count
-------
2
(1 row)
select (select count(inner_c) filter (where outer_c <> 0)
from (values (1)) t0(inner_c))
from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
count
-------
1
1
(2 rows)
select
(select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1))
filter (where o.unique1 < 10))
from tenk1 o; -- outer query is aggregation query
max
------
9998
(1 row)
-- subquery in FILTER clause (PostgreSQL extension)
select sum(unique1) FILTER (WHERE
unique1 IN (SELECT unique1 FROM onek where unique1 < 100)) FROM tenk1;
sum
------
4950
(1 row)
-- exercise lots of aggregate parts with FILTER
select aggfns(distinct a,b,c order by a,c using ~<~,b) filter (where a > 1)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
---------------------------
{"(2,2,bar)","(3,1,baz)"}
(1 row)
Fix check_agg_arguments' examination of aggregate FILTER clauses. Recursion into the FILTER clause was mis-implemented, such that a relevant Var or Aggref at the very top of the FILTER clause would be ignored. (Of course, that'd have to be a plain boolean Var or boolean-returning aggregate.) The consequence would be mis-identification of the correct semantic level of the aggregate, which could lead to not-per-spec query behavior. If the FILTER expression is an aggregate, this could also lead to failure to issue an expected "aggregate function calls cannot be nested" error, which would likely result in a core dump later on, since the planner and executor aren't expecting such cases to appear. The root cause is that commit b560ec1b0 blindly copied some code that assumed it's recursing into a List, and thus didn't examine the top-level node. To forestall questions about why this call doesn't look like the others, as well as possible future copy-and-paste mistakes, let's change all three check_agg_arguments_walker calls in check_agg_arguments, even though only the one for the filter clause is really broken. Per bug #17152 from Zhiyong Wu. This has been wrong since we implemented FILTER, so back-patch to all supported versions. (Testing suggests that pre-v11 branches manage to avoid crashing in the bad-Aggref case, thanks to "redundant" checks in ExecInitAgg. But I'm not sure how thorough that protection is, and anyway the wrong-behavior issue remains, so fix 9.6 and 10 too.) Discussion: https://postgr.es/m/17152-c7f906cc1a88e61b@postgresql.org
2021-08-19 00:12:51 +02:00
-- check handling of bare boolean Var in FILTER
select max(0) filter (where b1) from bool_test;
max
-----
0
(1 row)
select (select max(0) filter (where b1)) from bool_test;
max
-----
0
(1 row)
-- check for correct detection of nested-aggregate errors in FILTER
select max(unique1) filter (where sum(ten) > 0) from tenk1;
ERROR: aggregate functions are not allowed in FILTER
LINE 1: select max(unique1) filter (where sum(ten) > 0) from tenk1;
^
select (select max(unique1) filter (where sum(ten) > 0) from int8_tbl) from tenk1;
ERROR: aggregate function calls cannot be nested
LINE 1: select (select max(unique1) filter (where sum(ten) > 0) from...
^
select max(unique1) filter (where bool_or(ten > 0)) from tenk1;
ERROR: aggregate functions are not allowed in FILTER
LINE 1: select max(unique1) filter (where bool_or(ten > 0)) from ten...
^
select (select max(unique1) filter (where bool_or(ten > 0)) from int8_tbl) from tenk1;
ERROR: aggregate function calls cannot be nested
LINE 1: select (select max(unique1) filter (where bool_or(ten > 0)) ...
^
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
-- ordered-set aggregates
select p, percentile_cont(p) within group (order by x::float8)
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
p | percentile_cont
------+-----------------
0 | 1
0.1 | 1.4
0.25 | 2
0.4 | 2.6
0.5 | 3
0.6 | 3.4
0.75 | 4
0.9 | 4.6
1 | 5
(9 rows)
select p, percentile_cont(p order by p) within group (order by x) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: cannot use multiple ORDER BY clauses with WITHIN GROUP
LINE 1: select p, percentile_cont(p order by p) within group (order ...
^
select p, sum() within group (order by x::float8) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: sum is not an ordered-set aggregate, so it cannot have WITHIN GROUP
LINE 1: select p, sum() within group (order by x::float8) -- error
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
^
select p, percentile_cont(p,p) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: WITHIN GROUP is required for ordered-set aggregate percentile_cont
LINE 1: select p, percentile_cont(p,p) -- error
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
^
select percentile_cont(0.5) within group (order by b) from aggtest;
percentile_cont
------------------
53.4485001564026
(1 row)
select percentile_cont(0.5) within group (order by b), sum(b) from aggtest;
percentile_cont | sum
------------------+---------
53.4485001564026 | 431.773
(1 row)
select percentile_cont(0.5) within group (order by thousand) from tenk1;
percentile_cont
-----------------
499.5
(1 row)
select percentile_disc(0.5) within group (order by thousand) from tenk1;
percentile_disc
-----------------
499
(1 row)
select rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
rank
------
5
(1 row)
select cume_dist(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
cume_dist
-----------
0.875
(1 row)
select percent_rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4),(5)) v(x);
percent_rank
--------------
0.5
(1 row)
select dense_rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
dense_rank
------------
3
(1 row)
select percentile_disc(array[0,0.1,0.25,0.5,0.75,0.9,1]) within group (order by thousand)
from tenk1;
percentile_disc
----------------------------
{0,99,249,499,749,899,999}
(1 row)
select percentile_cont(array[0,0.25,0.5,0.75,1]) within group (order by thousand)
from tenk1;
percentile_cont
-----------------------------
{0,249.75,499.5,749.25,999}
(1 row)
select percentile_disc(array[[null,1,0.5],[0.75,0.25,null]]) within group (order by thousand)
from tenk1;
percentile_disc
---------------------------------
{{NULL,999,499},{749,249,NULL}}
(1 row)
select percentile_cont(array[0,1,0.25,0.75,0.5,1,0.3,0.32,0.35,0.38,0.4]) within group (order by x)
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
from generate_series(1,6) x;
percentile_cont
------------------------------------------
{1,6,2.25,4.75,3.5,6,2.5,2.6,2.75,2.9,3}
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
(1 row)
select ten, mode() within group (order by string4) from tenk1 group by ten;
ten | mode
-----+--------
0 | HHHHxx
1 | OOOOxx
2 | VVVVxx
3 | OOOOxx
4 | HHHHxx
5 | HHHHxx
6 | OOOOxx
7 | AAAAxx
8 | VVVVxx
9 | VVVVxx
(10 rows)
select percentile_disc(array[0.25,0.5,0.75]) within group (order by x)
from unnest('{fred,jim,fred,jack,jill,fred,jill,jim,jim,sheila,jim,sheila}'::text[]) u(x);
percentile_disc
-----------------
{fred,jill,jim}
(1 row)
-- check collation propagates up in suitable cases:
select pg_collation_for(percentile_disc(1) within group (order by x collate "POSIX"))
from (values ('fred'),('jim')) v(x);
pg_collation_for
------------------
"POSIX"
(1 row)
-- ordered-set aggs created with CREATE AGGREGATE
select test_rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
test_rank
-----------
5
(1 row)
select test_percentile_disc(0.5) within group (order by thousand) from tenk1;
test_percentile_disc
----------------------
499
(1 row)
-- ordered-set aggs can't use ungrouped vars in direct args:
select rank(x) within group (order by x) from generate_series(1,5) x;
ERROR: column "x.x" must appear in the GROUP BY clause or be used in an aggregate function
LINE 1: select rank(x) within group (order by x) from generate_serie...
^
DETAIL: Direct arguments of an ordered-set aggregate must use only grouped columns.
-- outer-level agg can't use a grouped arg of a lower level, either:
select array(select percentile_disc(a) within group (order by x)
from (values (0.3),(0.7)) v(a) group by a)
from generate_series(1,5) g(x);
ERROR: outer-level aggregate cannot contain a lower-level variable in its direct arguments
LINE 1: select array(select percentile_disc(a) within group (order b...
^
-- agg in the direct args is a grouping violation, too:
select rank(sum(x)) within group (order by x) from generate_series(1,5) x;
ERROR: aggregate function calls cannot be nested
LINE 1: select rank(sum(x)) within group (order by x) from generate_...
^
-- hypothetical-set type unification and argument-count failures:
select rank(3) within group (order by x) from (values ('fred'),('jim')) v(x);
ERROR: WITHIN GROUP types text and integer cannot be matched
LINE 1: select rank(3) within group (order by x) from (values ('fred...
^
select rank(3) within group (order by stringu1,stringu2) from tenk1;
ERROR: function rank(integer, name, name) does not exist
LINE 1: select rank(3) within group (order by stringu1,stringu2) fro...
^
HINT: To use the hypothetical-set aggregate rank, the number of hypothetical direct arguments (here 1) must match the number of ordering columns (here 2).
select rank('fred') within group (order by x) from generate_series(1,5) x;
ERROR: invalid input syntax for type integer: "fred"
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
LINE 1: select rank('fred') within group (order by x) from generate_...
^
select rank('adam'::text collate "C") within group (order by x collate "POSIX")
from (values ('fred'),('jim')) v(x);
ERROR: collation mismatch between explicit collations "C" and "POSIX"
LINE 1: ...adam'::text collate "C") within group (order by x collate "P...
^
-- hypothetical-set type unification successes:
select rank('adam'::varchar) within group (order by x) from (values ('fred'),('jim')) v(x);
rank
------
1
(1 row)
select rank('3') within group (order by x) from generate_series(1,5) x;
rank
------
3
(1 row)
-- divide by zero check
select percent_rank(0) within group (order by x) from generate_series(1,0) x;
percent_rank
--------------
0
(1 row)
-- deparse and multiple features:
create view aggordview1 as
select ten,
percentile_disc(0.5) within group (order by thousand) as p50,
percentile_disc(0.5) within group (order by thousand) filter (where hundred=1) as px,
rank(5,'AZZZZ',50) within group (order by hundred, string4 desc, hundred)
from tenk1
group by ten order by ten;
select pg_get_viewdef('aggordview1');
Get rid of the "new" and "old" entries in a view's rangetable. The rule system needs "old" and/or "new" pseudo-RTEs in rule actions that are ON INSERT/UPDATE/DELETE. Historically it's put such entries into the ON SELECT rules of views as well, but those are really quite vestigial. The only thing we've used them for is to carry the view's relid forward to AcquireExecutorLocks (so that we can re-lock the view to verify it hasn't changed before re-using a plan) and to carry its relid and permissions data forward to execution-time permissions checks. What we can do instead of that is to retain these fields of the RTE_RELATION RTE for the view even after we convert it to an RTE_SUBQUERY RTE. This requires a tiny amount of extra complication in the planner and AcquireExecutorLocks, but on the other hand we can get rid of the logic that moves that data from one place to another. The principal immediate benefit of doing this, aside from a small saving in the pg_rewrite data for views, is that these pseudo-RTEs no longer trigger ruleutils.c's heuristic about qualifying variable names when the rangetable's length is more than 1. That results in quite a number of small simplifications in regression test outputs, which are all to the good IMO. Bump catversion because we need to dump a few more fields of RTE_SUBQUERY RTEs. While those will always be zeroes anyway in stored rules (because we'd never populate them until query rewrite) they are useful for debugging, and it seems like we'd better make sure to transmit such RTEs accurately in plans sent to parallel workers. I don't think the executor actually examines these fields after startup, but someday it might. This is a second attempt at committing 1b4d280ea. The difference from the first time is that now we can add some filtering rules to AdjustUpgrade.pm to allow cross-version upgrade testing to pass despite all the cosmetic changes in CREATE VIEW outputs. Amit Langote (filtering rules by me) Discussion: https://postgr.es/m/CA+HiwqEf7gPN4Hn+LoZ4tP2q_Qt7n3vw7-6fJKOf92tSEnX6Gg@mail.gmail.com Discussion: https://postgr.es/m/891521.1673657296@sss.pgh.pa.us
2023-01-18 19:23:57 +01:00
pg_get_viewdef
-------------------------------------------------------------------------------------------------------------------
SELECT ten, +
percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) AS p50, +
percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) FILTER (WHERE (hundred = 1)) AS px,+
rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY hundred, string4 DESC, hundred) AS rank +
FROM tenk1 +
GROUP BY ten +
ORDER BY ten;
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
(1 row)
select * from aggordview1 order by ten;
ten | p50 | px | rank
-----+-----+-----+------
0 | 490 | | 101
1 | 491 | 401 | 101
2 | 492 | | 101
3 | 493 | | 101
4 | 494 | | 101
5 | 495 | | 67
6 | 496 | | 1
7 | 497 | | 1
8 | 498 | | 1
9 | 499 | | 1
(10 rows)
drop view aggordview1;
-- variadic aggregates
select least_agg(q1,q2) from int8_tbl;
least_agg
-------------------
-4567890123456789
(1 row)
select least_agg(variadic array[q1,q2]) from int8_tbl;
least_agg
-------------------
-4567890123456789
(1 row)
select cleast_agg(q1,q2) from int8_tbl;
cleast_agg
-------------------
-4567890123456789
(1 row)
select cleast_agg(4.5,f1) from int4_tbl;
cleast_agg
-------------
-2147483647
(1 row)
select cleast_agg(variadic array[4.5,f1]) from int4_tbl;
cleast_agg
-------------
-2147483647
(1 row)
select pg_typeof(cleast_agg(variadic array[4.5,f1])) from int4_tbl;
pg_typeof
-----------
numeric
(1 row)
-- test aggregates with common transition functions share the same states
begin work;
create type avg_state as (total bigint, count bigint);
create or replace function avg_transfn(state avg_state, n int) returns avg_state as
$$
declare new_state avg_state;
begin
raise notice 'avg_transfn called with %', n;
if state is null then
if n is not null then
new_state.total := n;
new_state.count := 1;
return new_state;
end if;
return null;
elsif n is not null then
state.total := state.total + n;
state.count := state.count + 1;
return state;
end if;
return null;
end
$$ language plpgsql;
create function avg_finalfn(state avg_state) returns int4 as
$$
begin
if state is null then
return NULL;
else
return state.total / state.count;
end if;
end
$$ language plpgsql;
create function sum_finalfn(state avg_state) returns int4 as
$$
begin
if state is null then
return NULL;
else
return state.total;
end if;
end
$$ language plpgsql;
create aggregate my_avg(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = avg_finalfn
);
create aggregate my_sum(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = sum_finalfn
);
-- aggregate state should be shared as aggs are the same.
select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_avg | my_avg
--------+--------
2 | 2
(1 row)
-- aggregate state should be shared as transfn is the same for both aggs.
select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
2 | 4
(1 row)
-- same as previous one, but with DISTINCT, which requires sorting the input.
select my_avg(distinct one),my_sum(distinct one) from (values(1),(3),(1)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
2 | 4
(1 row)
-- shouldn't share states due to the distinctness not matching.
select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 1
Improve performance of ORDER BY / DISTINCT aggregates ORDER BY / DISTINCT aggreagtes have, since implemented in Postgres, been executed by always performing a sort in nodeAgg.c to sort the tuples in the current group into the correct order before calling the transition function on the sorted tuples. This was not great as often there might be an index that could have provided pre-sorted input and allowed the transition functions to be called as the rows come in, rather than having to store them in a tuplestore in order to sort them once all the tuples for the group have arrived. Here we change the planner so it requests a path with a sort order which supports the most amount of ORDER BY / DISTINCT aggregate functions and add new code to the executor to allow it to support the processing of ORDER BY / DISTINCT aggregates where the tuples are already sorted in the correct order. Since there can be many ORDER BY / DISTINCT aggregates in any given query level, it's very possible that we can't find an order that suits all of these aggregates. The sort order that the planner chooses is simply the one that suits the most aggregate functions. We take the most strictly sorted variation of each order and see how many aggregate functions can use that, then we try again with the order of the remaining aggregates to see if another order would suit more aggregate functions. For example: SELECT agg(a ORDER BY a),agg2(a ORDER BY a,b) ... would request the sort order to be {a, b} because {a} is a subset of the sort order of {a,b}, but; SELECT agg(a ORDER BY a),agg2(a ORDER BY c) ... would just pick a plan ordered by {a} (we give precedence to aggregates which are earlier in the targetlist). SELECT agg(a ORDER BY a),agg2(a ORDER BY b),agg3(a ORDER BY b) ... would choose to order by {b} since two aggregates suit that vs just one that requires input ordered by {a}. Author: David Rowley Reviewed-by: Ronan Dunklau, James Coleman, Ranier Vilela, Richard Guo, Tom Lane Discussion: https://postgr.es/m/CAApHDvpHzfo92%3DR4W0%2BxVua3BUYCKMckWAmo-2t_KiXN-wYH%3Dw%40mail.gmail.com
2022-08-02 13:11:45 +02:00
NOTICE: avg_transfn called with 3
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
2 | 4
(1 row)
-- shouldn't share states due to the filter clause not matching.
select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
3 | 4
(1 row)
-- this should not share the state due to different input columns.
select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
NOTICE: avg_transfn called with 1
Avoid using lcons and list_delete_first where it's easy to do so. Formerly, lcons was about the same speed as lappend, but with the new List implementation, that's not so; with a long List, data movement imposes an O(N) cost on lcons and list_delete_first, but not lappend. Hence, invent list_delete_last with semantics parallel to list_delete_first (but O(1) cost), and change various places to use lappend and list_delete_last where this can be done without much violence to the code logic. There are quite a few places that construct result lists using lcons not lappend. Some have semantic rationales for that; I added comments about it to a couple that didn't have them already. In many such places though, I think the coding is that way only because back in the dark ages lcons was faster than lappend. Hence, switch to lappend where this can be done without causing semantic changes. In ExecInitExprRec(), this results in aggregates and window functions that are in the same plan node being executed in a different order than before. Generally, the executions of such functions ought to be independent of each other, so this shouldn't result in visibly different query results. But if you push it, as one regression test case does, you can show that the order is different. The new order seems saner; it's closer to the order of the functions in the query text. And we never documented or promised anything about this, anyway. Also, in gistfinishsplit(), don't bother building a reverse-order list; it's easy now to iterate backwards through the original list. It'd be possible to go further towards removing uses of lcons and list_delete_first, but it'd require more extensive logic changes, and I'm not convinced it's worth it. Most of the remaining uses deal with queues that probably never get long enough to be worth sweating over. (Actually, I doubt that any of the changes in this patch will have measurable performance effects either. But better to have good examples than bad ones in the code base.) Patch by me, thanks to David Rowley and Daniel Gustafsson for review. Discussion: https://postgr.es/m/21272.1563318411@sss.pgh.pa.us
2019-07-17 17:15:28 +02:00
NOTICE: avg_transfn called with 2
NOTICE: avg_transfn called with 3
Avoid using lcons and list_delete_first where it's easy to do so. Formerly, lcons was about the same speed as lappend, but with the new List implementation, that's not so; with a long List, data movement imposes an O(N) cost on lcons and list_delete_first, but not lappend. Hence, invent list_delete_last with semantics parallel to list_delete_first (but O(1) cost), and change various places to use lappend and list_delete_last where this can be done without much violence to the code logic. There are quite a few places that construct result lists using lcons not lappend. Some have semantic rationales for that; I added comments about it to a couple that didn't have them already. In many such places though, I think the coding is that way only because back in the dark ages lcons was faster than lappend. Hence, switch to lappend where this can be done without causing semantic changes. In ExecInitExprRec(), this results in aggregates and window functions that are in the same plan node being executed in a different order than before. Generally, the executions of such functions ought to be independent of each other, so this shouldn't result in visibly different query results. But if you push it, as one regression test case does, you can show that the order is different. The new order seems saner; it's closer to the order of the functions in the query text. And we never documented or promised anything about this, anyway. Also, in gistfinishsplit(), don't bother building a reverse-order list; it's easy now to iterate backwards through the original list. It'd be possible to go further towards removing uses of lcons and list_delete_first, but it'd require more extensive logic changes, and I'm not convinced it's worth it. Most of the remaining uses deal with queues that probably never get long enough to be worth sweating over. (Actually, I doubt that any of the changes in this patch will have measurable performance effects either. But better to have good examples than bad ones in the code base.) Patch by me, thanks to David Rowley and Daniel Gustafsson for review. Discussion: https://postgr.es/m/21272.1563318411@sss.pgh.pa.us
2019-07-17 17:15:28 +02:00
NOTICE: avg_transfn called with 4
my_avg | my_sum
--------+--------
2 | 6
(1 row)
-- exercise cases where OSAs share state
select
percentile_cont(0.5) within group (order by a),
percentile_disc(0.5) within group (order by a)
from (values(1::float8),(3),(5),(7)) t(a);
percentile_cont | percentile_disc
-----------------+-----------------
4 | 3
(1 row)
select
percentile_cont(0.25) within group (order by a),
percentile_disc(0.5) within group (order by a)
from (values(1::float8),(3),(5),(7)) t(a);
percentile_cont | percentile_disc
-----------------+-----------------
2.5 | 3
(1 row)
-- these can't share state currently
select
rank(4) within group (order by a),
dense_rank(4) within group (order by a)
from (values(1),(3),(5),(7)) t(a);
rank | dense_rank
------+------------
3 | 3
(1 row)
-- test that aggs with the same sfunc and initcond share the same agg state
create aggregate my_sum_init(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = sum_finalfn,
initcond = '(10,0)'
);
create aggregate my_avg_init(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = avg_finalfn,
initcond = '(10,0)'
);
create aggregate my_avg_init2(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = avg_finalfn,
initcond = '(4,0)'
);
-- state should be shared if INITCONDs are matching
select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_sum_init | my_avg_init
-------------+-------------
14 | 7
(1 row)
-- Varying INITCONDs should cause the states not to be shared.
select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
NOTICE: avg_transfn called with 3
my_sum_init | my_avg_init2
-------------+--------------
14 | 4
(1 row)
rollback;
-- test aggregate state sharing to ensure it works if one aggregate has a
-- finalfn and the other one has none.
begin work;
create or replace function sum_transfn(state int4, n int4) returns int4 as
$$
declare new_state int4;
begin
raise notice 'sum_transfn called with %', n;
if state is null then
if n is not null then
new_state := n;
return new_state;
end if;
return null;
elsif n is not null then
state := state + n;
return state;
end if;
return null;
end
$$ language plpgsql;
create function halfsum_finalfn(state int4) returns int4 as
$$
begin
if state is null then
return NULL;
else
return state / 2;
end if;
end
$$ language plpgsql;
create aggregate my_sum(int4)
(
stype = int4,
sfunc = sum_transfn
);
create aggregate my_half_sum(int4)
(
stype = int4,
sfunc = sum_transfn,
finalfunc = halfsum_finalfn
);
-- Agg state should be shared even though my_sum has no finalfn
select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
NOTICE: sum_transfn called with 1
NOTICE: sum_transfn called with 2
NOTICE: sum_transfn called with 3
NOTICE: sum_transfn called with 4
my_sum | my_half_sum
--------+-------------
10 | 5
(1 row)
rollback;
-- test that the aggregate transition logic correctly handles
-- transition / combine functions returning NULL
-- First test the case of a normal transition function returning NULL
BEGIN;
CREATE FUNCTION balkifnull(int8, int4)
RETURNS int8
STRICT
LANGUAGE plpgsql AS $$
BEGIN
IF $1 IS NULL THEN
RAISE 'erroneously called with NULL argument';
END IF;
RETURN NULL;
END$$;
Avoid unnecessary use of pg_strcasecmp for already-downcased identifiers. We have a lot of code in which option names, which from the user's viewpoint are logically keywords, are passed through the grammar as plain identifiers, and then matched to string literals during command execution. This approach avoids making words into lexer keywords unnecessarily. Some places matched these strings using plain strcmp, some using pg_strcasecmp. But the latter should be unnecessary since identifiers would have been downcased on their way through the parser. Aside from any efficiency concerns (probably not a big factor), the lack of consistency in this area creates a hazard of subtle bugs due to different places coming to different conclusions about whether two option names are the same or different. Hence, standardize on using strcmp() to match any option names that are expected to have been fed through the parser. This does create a user-visible behavioral change, which is that while formerly all of these would work: alter table foo set (fillfactor = 50); alter table foo set (FillFactor = 50); alter table foo set ("fillfactor" = 50); alter table foo set ("FillFactor" = 50); now the last case will fail because that double-quoted identifier is different from the others. However, none of our documentation says that you can use a quoted identifier in such contexts at all, and we should discourage doing so since it would break if we ever decide to parse such constructs as true lexer keywords rather than poor man's substitutes. So this shouldn't create a significant compatibility issue for users. Daniel Gustafsson, reviewed by Michael Paquier, small changes by me Discussion: https://postgr.es/m/29405B24-564E-476B-98C0-677A29805B84@yesql.se
2018-01-27 00:25:02 +01:00
CREATE AGGREGATE balk(int4)
(
SFUNC = balkifnull(int8, int4),
STYPE = int8,
Avoid unnecessary use of pg_strcasecmp for already-downcased identifiers. We have a lot of code in which option names, which from the user's viewpoint are logically keywords, are passed through the grammar as plain identifiers, and then matched to string literals during command execution. This approach avoids making words into lexer keywords unnecessarily. Some places matched these strings using plain strcmp, some using pg_strcasecmp. But the latter should be unnecessary since identifiers would have been downcased on their way through the parser. Aside from any efficiency concerns (probably not a big factor), the lack of consistency in this area creates a hazard of subtle bugs due to different places coming to different conclusions about whether two option names are the same or different. Hence, standardize on using strcmp() to match any option names that are expected to have been fed through the parser. This does create a user-visible behavioral change, which is that while formerly all of these would work: alter table foo set (fillfactor = 50); alter table foo set (FillFactor = 50); alter table foo set ("fillfactor" = 50); alter table foo set ("FillFactor" = 50); now the last case will fail because that double-quoted identifier is different from the others. However, none of our documentation says that you can use a quoted identifier in such contexts at all, and we should discourage doing so since it would break if we ever decide to parse such constructs as true lexer keywords rather than poor man's substitutes. So this shouldn't create a significant compatibility issue for users. Daniel Gustafsson, reviewed by Michael Paquier, small changes by me Discussion: https://postgr.es/m/29405B24-564E-476B-98C0-677A29805B84@yesql.se
2018-01-27 00:25:02 +01:00
PARALLEL = SAFE,
INITCOND = '0'
);
SELECT balk(hundred) FROM tenk1;
balk
------
(1 row)
ROLLBACK;
-- GROUP BY optimization by reordering GROUP BY clauses
2024-01-21 21:21:36 +01:00
CREATE TABLE btg AS SELECT
i % 10 AS x,
i % 10 AS y,
2024-01-21 21:21:36 +01:00
'abc' || i % 10 AS z,
i AS w
FROM generate_series(1, 100) AS i;
CREATE INDEX btg_x_y_idx ON btg(x, y);
2024-01-21 21:21:36 +01:00
ANALYZE btg;
SET enable_hashagg = off;
SET enable_seqscan = off;
-- Utilize the ordering of index scan to avoid a Sort operation
EXPLAIN (COSTS OFF)
SELECT count(*) FROM btg GROUP BY y, x;
QUERY PLAN
------------------------------------------------
2024-01-21 21:21:36 +01:00
GroupAggregate
Group Key: x, y
-> Index Only Scan using btg_x_y_idx on btg
2024-01-21 21:21:36 +01:00
(3 rows)
-- Engage incremental sort
EXPLAIN (COSTS OFF)
SELECT count(*) FROM btg GROUP BY z, y, w, x;
QUERY PLAN
-------------------------------------------------
GroupAggregate
2024-01-21 21:21:36 +01:00
Group Key: x, y, z, w
-> Incremental Sort
Sort Key: x, y, z, w
Presorted Key: x, y
-> Index Scan using btg_x_y_idx on btg
2024-01-21 21:21:36 +01:00
(6 rows)
-- Utilize the ordering of subquery scan to avoid a Sort operation
EXPLAIN (COSTS OFF) SELECT count(*)
FROM (SELECT * FROM btg ORDER BY x, y, w, z) AS q1
GROUP BY w, x, z, y;
QUERY PLAN
-------------------------------------------------
GroupAggregate
2024-01-21 21:21:36 +01:00
Group Key: btg.x, btg.y, btg.w, btg.z
-> Incremental Sort
Sort Key: btg.x, btg.y, btg.w, btg.z
Presorted Key: btg.x, btg.y
-> Index Scan using btg_x_y_idx on btg
2024-01-21 21:21:36 +01:00
(6 rows)
-- Utilize the ordering of merge join to avoid a full Sort operation
SET enable_hashjoin = off;
SET enable_nestloop = off;
EXPLAIN (COSTS OFF)
SELECT count(*)
FROM btg t1 JOIN btg t2 ON t1.z = t2.z AND t1.w = t2.w AND t1.x = t2.x
GROUP BY t1.x, t1.y, t1.z, t1.w;
QUERY PLAN
-------------------------------------------------------------------------------
GroupAggregate
Group Key: t1.z, t1.w, t1.x, t1.y
-> Incremental Sort
Sort Key: t1.z, t1.w, t1.x, t1.y
Presorted Key: t1.z, t1.w, t1.x
-> Merge Join
Merge Cond: ((t1.z = t2.z) AND (t1.w = t2.w) AND (t1.x = t2.x))
-> Sort
Sort Key: t1.z, t1.w, t1.x
-> Index Scan using btg_x_y_idx on btg t1
-> Sort
Sort Key: t2.z, t2.w, t2.x
-> Index Scan using btg_x_y_idx on btg t2
(13 rows)
2024-01-21 21:21:36 +01:00
RESET enable_nestloop;
RESET enable_hashjoin;
2024-01-21 21:21:36 +01:00
-- Should work with and without GROUP-BY optimization
EXPLAIN (COSTS OFF)
SELECT count(*) FROM btg GROUP BY w, x, z, y ORDER BY y, x, z, w;
QUERY PLAN
-------------------------------------------------
GroupAggregate
2024-01-21 21:21:36 +01:00
Group Key: y, x, z, w
-> Sort
Sort Key: y, x, z, w
-> Index Scan using btg_x_y_idx on btg
2024-01-21 21:21:36 +01:00
(5 rows)
-- Utilize incremental sort to make the ORDER BY rule a bit cheaper
EXPLAIN (COSTS OFF)
SELECT count(*) FROM btg GROUP BY w, x, y, z ORDER BY x*x, z;
QUERY PLAN
-------------------------------------------------------
2024-01-21 21:21:36 +01:00
Sort
Sort Key: ((x * x)), z
-> GroupAggregate
2024-01-21 21:21:36 +01:00
Group Key: x, y, w, z
-> Incremental Sort
Sort Key: x, y, w, z
Presorted Key: x, y
-> Index Scan using btg_x_y_idx on btg
2024-01-21 21:21:36 +01:00
(8 rows)
-- Test the case where the number of incoming subtree path keys is more than
2024-01-21 21:21:36 +01:00
-- the number of grouping keys.
CREATE INDEX btg_y_x_w_idx ON btg(y, x, w);
2024-01-21 21:21:36 +01:00
EXPLAIN (VERBOSE, COSTS OFF)
SELECT y, x, array_agg(distinct w)
FROM btg WHERE y < 0 GROUP BY x, y;
QUERY PLAN
---------------------------------------------------------
2024-01-21 21:21:36 +01:00
GroupAggregate
Output: y, x, array_agg(DISTINCT w)
Group Key: btg.y, btg.x
-> Index Only Scan using btg_y_x_w_idx on public.btg
2024-01-21 21:21:36 +01:00
Output: y, x, w
Index Cond: (btg.y < 0)
(6 rows)
-- Ensure that we do not select the aggregate pathkeys instead of the grouping
-- pathkeys
CREATE TABLE group_agg_pk AS SELECT
i % 10 AS x,
i % 2 AS y,
i % 2 AS z,
2 AS w,
i % 10 AS f
FROM generate_series(1,100) AS i;
ANALYZE group_agg_pk;
SET enable_nestloop = off;
SET enable_hashjoin = off;
EXPLAIN (COSTS OFF)
SELECT avg(c1.f ORDER BY c1.x, c1.y)
FROM group_agg_pk c1 JOIN group_agg_pk c2 ON c1.x = c2.x
GROUP BY c1.w, c1.z;
QUERY PLAN
-----------------------------------------------------
GroupAggregate
Group Key: c1.w, c1.z
-> Sort
Sort Key: c1.w, c1.z, c1.x, c1.y
-> Merge Join
Merge Cond: (c1.x = c2.x)
-> Sort
Sort Key: c1.x
-> Seq Scan on group_agg_pk c1
-> Sort
Sort Key: c2.x
-> Seq Scan on group_agg_pk c2
(12 rows)
SELECT avg(c1.f ORDER BY c1.x, c1.y)
FROM group_agg_pk c1 JOIN group_agg_pk c2 ON c1.x = c2.x
GROUP BY c1.w, c1.z;
avg
--------------------
4.0000000000000000
5.0000000000000000
(2 rows)
RESET enable_nestloop;
RESET enable_hashjoin;
DROP TABLE group_agg_pk;
-- Test the case where the ordering of the scan matches the ordering within the
-- aggregate but cannot be found in the group-by list
CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int);
CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2);
INSERT INTO agg_sort_order SELECT i, i FROM generate_series(1,100)i;
ANALYZE agg_sort_order;
EXPLAIN (COSTS OFF)
2024-01-21 21:21:36 +01:00
SELECT array_agg(c1 ORDER BY c2),c2
FROM agg_sort_order WHERE c2 < 100 GROUP BY c1 ORDER BY 2;
QUERY PLAN
----------------------------------------------------------------------------
2024-01-21 21:21:36 +01:00
Sort
Sort Key: c2
-> GroupAggregate
Group Key: c1
-> Sort
Sort Key: c1, c2
-> Index Scan using agg_sort_order_c2_idx on agg_sort_order
Index Cond: (c2 < 100)
(8 rows)
2024-01-21 21:21:36 +01:00
DROP TABLE agg_sort_order CASCADE;
DROP TABLE btg;
2024-01-21 21:21:36 +01:00
RESET enable_hashagg;
RESET enable_seqscan;
-- Secondly test the case of a parallel aggregate combiner function
-- returning NULL. For that use normal transition function, but a
-- combiner function returning NULL.
BEGIN;
CREATE FUNCTION balkifnull(int8, int8)
RETURNS int8
PARALLEL SAFE
STRICT
LANGUAGE plpgsql AS $$
BEGIN
IF $1 IS NULL THEN
RAISE 'erroneously called with NULL argument';
END IF;
RETURN NULL;
END$$;
Avoid unnecessary use of pg_strcasecmp for already-downcased identifiers. We have a lot of code in which option names, which from the user's viewpoint are logically keywords, are passed through the grammar as plain identifiers, and then matched to string literals during command execution. This approach avoids making words into lexer keywords unnecessarily. Some places matched these strings using plain strcmp, some using pg_strcasecmp. But the latter should be unnecessary since identifiers would have been downcased on their way through the parser. Aside from any efficiency concerns (probably not a big factor), the lack of consistency in this area creates a hazard of subtle bugs due to different places coming to different conclusions about whether two option names are the same or different. Hence, standardize on using strcmp() to match any option names that are expected to have been fed through the parser. This does create a user-visible behavioral change, which is that while formerly all of these would work: alter table foo set (fillfactor = 50); alter table foo set (FillFactor = 50); alter table foo set ("fillfactor" = 50); alter table foo set ("FillFactor" = 50); now the last case will fail because that double-quoted identifier is different from the others. However, none of our documentation says that you can use a quoted identifier in such contexts at all, and we should discourage doing so since it would break if we ever decide to parse such constructs as true lexer keywords rather than poor man's substitutes. So this shouldn't create a significant compatibility issue for users. Daniel Gustafsson, reviewed by Michael Paquier, small changes by me Discussion: https://postgr.es/m/29405B24-564E-476B-98C0-677A29805B84@yesql.se
2018-01-27 00:25:02 +01:00
CREATE AGGREGATE balk(int4)
(
SFUNC = int4_sum(int8, int4),
STYPE = int8,
COMBINEFUNC = balkifnull(int8, int8),
Avoid unnecessary use of pg_strcasecmp for already-downcased identifiers. We have a lot of code in which option names, which from the user's viewpoint are logically keywords, are passed through the grammar as plain identifiers, and then matched to string literals during command execution. This approach avoids making words into lexer keywords unnecessarily. Some places matched these strings using plain strcmp, some using pg_strcasecmp. But the latter should be unnecessary since identifiers would have been downcased on their way through the parser. Aside from any efficiency concerns (probably not a big factor), the lack of consistency in this area creates a hazard of subtle bugs due to different places coming to different conclusions about whether two option names are the same or different. Hence, standardize on using strcmp() to match any option names that are expected to have been fed through the parser. This does create a user-visible behavioral change, which is that while formerly all of these would work: alter table foo set (fillfactor = 50); alter table foo set (FillFactor = 50); alter table foo set ("fillfactor" = 50); alter table foo set ("FillFactor" = 50); now the last case will fail because that double-quoted identifier is different from the others. However, none of our documentation says that you can use a quoted identifier in such contexts at all, and we should discourage doing so since it would break if we ever decide to parse such constructs as true lexer keywords rather than poor man's substitutes. So this shouldn't create a significant compatibility issue for users. Daniel Gustafsson, reviewed by Michael Paquier, small changes by me Discussion: https://postgr.es/m/29405B24-564E-476B-98C0-677A29805B84@yesql.se
2018-01-27 00:25:02 +01:00
PARALLEL = SAFE,
INITCOND = '0'
);
-- force use of parallelism
ALTER TABLE tenk1 set (parallel_workers = 4);
SET LOCAL parallel_setup_cost=0;
SET LOCAL max_parallel_workers_per_gather=4;
EXPLAIN (COSTS OFF) SELECT balk(hundred) FROM tenk1;
QUERY PLAN
-------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 4
-> Partial Aggregate
-> Parallel Index Only Scan using tenk1_hundred on tenk1
(5 rows)
SELECT balk(hundred) FROM tenk1;
balk
------
(1 row)
ROLLBACK;
-- test multiple usage of an aggregate whose finalfn returns a R/W datum
BEGIN;
CREATE FUNCTION rwagg_sfunc(x anyarray, y anyarray) RETURNS anyarray
LANGUAGE plpgsql IMMUTABLE AS $$
BEGIN
RETURN array_fill(y[1], ARRAY[4]);
END;
$$;
CREATE FUNCTION rwagg_finalfunc(x anyarray) RETURNS anyarray
LANGUAGE plpgsql STRICT IMMUTABLE AS $$
DECLARE
res x%TYPE;
BEGIN
-- assignment is essential for this test, it expands the array to R/W
res := array_fill(x[1], ARRAY[4]);
RETURN res;
END;
$$;
CREATE AGGREGATE rwagg(anyarray) (
STYPE = anyarray,
SFUNC = rwagg_sfunc,
FINALFUNC = rwagg_finalfunc
);
CREATE FUNCTION eatarray(x real[]) RETURNS real[]
LANGUAGE plpgsql STRICT IMMUTABLE AS $$
BEGIN
x[1] := x[1] + 1;
RETURN x;
END;
$$;
SELECT eatarray(rwagg(ARRAY[1.0::real])), eatarray(rwagg(ARRAY[1.0::real]));
eatarray | eatarray
-----------+-----------
{2,1,1,1} | {2,1,1,1}
(1 row)
ROLLBACK;
-- test coverage for aggregate combine/serial/deserial functions
BEGIN;
SET parallel_setup_cost = 0;
SET parallel_tuple_cost = 0;
SET min_parallel_table_scan_size = 0;
SET max_parallel_workers_per_gather = 4;
SET parallel_leader_participation = off;
SET enable_indexonlyscan = off;
-- variance(int4) covers numeric_poly_combine
-- sum(int8) covers int8_avg_combine
Minimally fix partial aggregation for aggregates that don't have one argument. For partial aggregation combine steps, AggStatePerTrans->numTransInputs was set to the transition function's number of inputs, rather than the combine function's number of inputs (always 1). That lead to partial aggregates with strict combine functions to wrongly check for NOT NULL input as required by strictness. When the aggregate wasn't exactly passed one argument, the strictness check was either omitted (in the 0 args case) or too many arguments were checked. In the latter case we'd read beyond the end of FunctionCallInfoData->args (only in master). AggStatePerTrans->numTransInputs actually has been wrong since since 9.6, where partial aggregates were added. But it turns out to not be an active problem in 9.6 and 10, because numTransInputs wasn't used at all for combine functions: Before c253b722f6 there simply was no NULL check for the input to strict trans functions, and after that the check was simply hardcoded for the right offset in fcinfo, as it's done by code specific to combine functions. In bf6c614a2f2 (11) the strictness check was generalized, with common code doing the strictness checks for both plain and combine transition functions, based on numTransInputs. For combine functions this lead to not emitting an expression step to check for strict input in the 0 arguments case, and in the > 1 arguments case, we'd check too many arguments.Due to the fact that the relevant fcinfo->isnull[2..] was always zero-initialized (more or less by accident, by being part of the AggStatePerTrans struct, which is palloc0'ed), there was no observable damage in the latter case before a9c35cf85ca1f, we just checked too many array elements. Due to the changes in a9c35cf85ca1f, > 1 argument bug became visible, because these days fcinfo is a) dynamically allocated without being zeroed b) exactly the length required for the number of specified arguments (hardcoded to 2 in this case). This commit only contains a fairly minimal fix, setting numTransInputs to a hardcoded 1 when building a pertrans for a combine function. It seems likely that we'll want to clean this up further (e.g. the arguments build_pertrans_for_aggref() aren't particularly meaningful for combine functions). But the wrap date for 12 beta1 is coming up fast, so it seems good to have a minimal fix in place. Backpatch to 11. While AggStatePerTrans->numTransInputs was set wrongly before that, the value was not used for combine functions. Reported-By: Rajkumar Raghuwanshi Diagnosed-By: Kyotaro Horiguchi, Jeevan Chalke, Andres Freund, David Rowley Author: David Rowley, Kyotaro Horiguchi, Andres Freund Discussion: https://postgr.es/m/CAKcux6=uZEyWyLw0N7HtR9OBc-sWEFeByEZC7t-KDf15FKxVew@mail.gmail.com
2019-05-20 03:01:06 +02:00
-- regr_count(float8, float8) covers int8inc_float8_float8 and aggregates with > 1 arg
EXPLAIN (COSTS OFF, VERBOSE)
SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8)
FROM (SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1) u;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize Aggregate
Output: variance(tenk1.unique1), sum((tenk1.unique1)::bigint), regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision)
-> Gather
Output: (PARTIAL variance(tenk1.unique1)), (PARTIAL sum((tenk1.unique1)::bigint)), (PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision))
Workers Planned: 4
-> Partial Aggregate
Output: PARTIAL variance(tenk1.unique1), PARTIAL sum((tenk1.unique1)::bigint), PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision)
-> Parallel Append
-> Parallel Seq Scan on public.tenk1
Output: tenk1.unique1
-> Parallel Seq Scan on public.tenk1 tenk1_1
Output: tenk1_1.unique1
-> Parallel Seq Scan on public.tenk1 tenk1_2
Output: tenk1_2.unique1
-> Parallel Seq Scan on public.tenk1 tenk1_3
Output: tenk1_3.unique1
(16 rows)
SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8)
FROM (SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1) u;
variance | sum | regr_count
----------------------+-----------+------------
8333541.588539713493 | 199980000 | 40000
(1 row)
-- variance(int8) covers numeric_combine
-- avg(numeric) covers numeric_avg_combine
EXPLAIN (COSTS OFF, VERBOSE)
SELECT variance(unique1::int8), avg(unique1::numeric)
FROM (SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1) u;
QUERY PLAN
--------------------------------------------------------------------------------------------------------
Finalize Aggregate
Output: variance((tenk1.unique1)::bigint), avg((tenk1.unique1)::numeric)
-> Gather
Output: (PARTIAL variance((tenk1.unique1)::bigint)), (PARTIAL avg((tenk1.unique1)::numeric))
Workers Planned: 4
-> Partial Aggregate
Output: PARTIAL variance((tenk1.unique1)::bigint), PARTIAL avg((tenk1.unique1)::numeric)
-> Parallel Append
-> Parallel Seq Scan on public.tenk1
Output: tenk1.unique1
-> Parallel Seq Scan on public.tenk1 tenk1_1
Output: tenk1_1.unique1
-> Parallel Seq Scan on public.tenk1 tenk1_2
Output: tenk1_2.unique1
-> Parallel Seq Scan on public.tenk1 tenk1_3
Output: tenk1_3.unique1
(16 rows)
SELECT variance(unique1::int8), avg(unique1::numeric)
FROM (SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1
UNION ALL SELECT * FROM tenk1) u;
variance | avg
----------------------+-----------------------
8333541.588539713493 | 4999.5000000000000000
(1 row)
ROLLBACK;
-- test coverage for dense_rank
SELECT dense_rank(x) WITHIN GROUP (ORDER BY x) FROM (VALUES (1),(1),(2),(2),(3),(3)) v(x) GROUP BY (x) ORDER BY 1;
dense_rank
------------
1
1
1
(3 rows)
-- Ensure that the STRICT checks for aggregates does not take NULLness
-- of ORDER BY columns into account. See bug report around
-- 2a505161-2727-2473-7c46-591ed108ac52@email.cz
SELECT min(x ORDER BY y) FROM (VALUES(1, NULL)) AS d(x,y);
min
-----
1
(1 row)
SELECT min(x ORDER BY y) FROM (VALUES(1, 2)) AS d(x,y);
min
-----
1
(1 row)
-- check collation-sensitive matching between grouping expressions
select v||'a', case v||'a' when 'aa' then 1 else 0 end, count(*)
from unnest(array['a','b']) u(v)
group by v||'a' order by 1;
?column? | case | count
----------+------+-------
aa | 1 | 1
ba | 0 | 1
(2 rows)
select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
from unnest(array['a','b']) u(v)
group by v||'a' order by 1;
?column? | case | count
----------+------+-------
aa | 1 | 1
ba | 0 | 1
(2 rows)
-- Make sure that generation of HashAggregate for uniqification purposes
-- does not lead to array overflow due to unexpected duplicate hash keys
-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com
set enable_memoize to off;
explain (costs off)
select 1 from tenk1
where (hundred, thousand) in (select twothousand, twothousand from onek);
QUERY PLAN
-------------------------------------------------------------
Hash Join
Hash Cond: (tenk1.hundred = onek.twothousand)
-> Seq Scan on tenk1
Filter: (hundred = thousand)
-> Hash
-> HashAggregate
Group Key: onek.twothousand, onek.twothousand
-> Seq Scan on onek
(8 rows)
reset enable_memoize;
--
-- Hash Aggregation Spill tests
--
set enable_sort=false;
set work_mem='64kB';
select unique1, count(*), sum(twothousand) from tenk1
group by unique1
having sum(fivethous) > 4975
order by sum(twothousand);
unique1 | count | sum
---------+-------+------
4976 | 1 | 976
4977 | 1 | 977
4978 | 1 | 978
4979 | 1 | 979
4980 | 1 | 980
4981 | 1 | 981
4982 | 1 | 982
4983 | 1 | 983
4984 | 1 | 984
4985 | 1 | 985
4986 | 1 | 986
4987 | 1 | 987
4988 | 1 | 988
4989 | 1 | 989
4990 | 1 | 990
4991 | 1 | 991
4992 | 1 | 992
4993 | 1 | 993
4994 | 1 | 994
4995 | 1 | 995
4996 | 1 | 996
4997 | 1 | 997
4998 | 1 | 998
4999 | 1 | 999
9976 | 1 | 1976
9977 | 1 | 1977
9978 | 1 | 1978
9979 | 1 | 1979
9980 | 1 | 1980
9981 | 1 | 1981
9982 | 1 | 1982
9983 | 1 | 1983
9984 | 1 | 1984
9985 | 1 | 1985
9986 | 1 | 1986
9987 | 1 | 1987
9988 | 1 | 1988
9989 | 1 | 1989
9990 | 1 | 1990
9991 | 1 | 1991
9992 | 1 | 1992
9993 | 1 | 1993
9994 | 1 | 1994
9995 | 1 | 1995
9996 | 1 | 1996
9997 | 1 | 1997
9998 | 1 | 1998
9999 | 1 | 1999
(48 rows)
set work_mem to default;
set enable_sort to default;
--
-- Compare results between plans using sorting and plans using hash
-- aggregation. Force spilling in both cases by setting work_mem low.
--
set work_mem='64kB';
create table agg_data_2k as
select g from generate_series(0, 1999) g;
analyze agg_data_2k;
create table agg_data_20k as
select g from generate_series(0, 19999) g;
analyze agg_data_20k;
-- Produce results with sorting.
set enable_hashagg = false;
set jit_above_cost = 0;
explain (costs off)
select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
from agg_data_20k group by g%10000;
QUERY PLAN
--------------------------------------
GroupAggregate
Group Key: ((g % 10000))
-> Sort
Sort Key: ((g % 10000))
-> Seq Scan on agg_data_20k
(5 rows)
create table agg_group_1 as
select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
from agg_data_20k group by g%10000;
create table agg_group_2 as
select * from
(values (100), (300), (500)) as r(a),
lateral (
select (g/2)::numeric as c1,
array_agg(g::numeric) as c2,
count(*) as c3
from agg_data_2k
where g < r.a
group by g/2) as s;
set jit_above_cost to default;
create table agg_group_3 as
select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
from agg_data_2k group by g/2;
create table agg_group_4 as
select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
from agg_data_2k group by g/2;
-- Produce results with hash aggregation
set enable_hashagg = true;
set enable_sort = false;
set jit_above_cost = 0;
explain (costs off)
select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
from agg_data_20k group by g%10000;
QUERY PLAN
--------------------------------
HashAggregate
Group Key: (g % 10000)
-> Seq Scan on agg_data_20k
(3 rows)
create table agg_hash_1 as
select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
from agg_data_20k group by g%10000;
create table agg_hash_2 as
select * from
(values (100), (300), (500)) as r(a),
lateral (
select (g/2)::numeric as c1,
array_agg(g::numeric) as c2,
count(*) as c3
from agg_data_2k
where g < r.a
group by g/2) as s;
set jit_above_cost to default;
create table agg_hash_3 as
select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
from agg_data_2k group by g/2;
create table agg_hash_4 as
select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
from agg_data_2k group by g/2;
set enable_sort = true;
set work_mem to default;
-- Compare group aggregation results to hash aggregation results
(select * from agg_hash_1 except select * from agg_group_1)
union all
(select * from agg_group_1 except select * from agg_hash_1);
c1 | c2 | c3
----+----+----
(0 rows)
(select * from agg_hash_2 except select * from agg_group_2)
union all
(select * from agg_group_2 except select * from agg_hash_2);
a | c1 | c2 | c3
---+----+----+----
(0 rows)
(select * from agg_hash_3 except select * from agg_group_3)
union all
(select * from agg_group_3 except select * from agg_hash_3);
c1 | c2 | c3
----+----+----
(0 rows)
(select * from agg_hash_4 except select * from agg_group_4)
union all
(select * from agg_group_4 except select * from agg_hash_4);
c1 | c2 | c3
----+----+----
(0 rows)
drop table agg_group_1;
drop table agg_group_2;
drop table agg_group_3;
drop table agg_group_4;
drop table agg_hash_1;
drop table agg_hash_2;
drop table agg_hash_3;
drop table agg_hash_4;