postgresql/src/test/regress/expected/aggregates.out

1852 lines
52 KiB
Plaintext
Raw Normal View History

--
-- AGGREGATES
--
SELECT avg(four) AS avg_1 FROM onek;
avg_1
--------------------
1.5000000000000000
(1 row)
SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100;
avg_32
---------------------
32.6666666666666667
(1 row)
-- In 7.1, avg(float4) is computed using float8 arithmetic.
-- Round the result to 3 digits to avoid platform-specific results.
SELECT avg(b)::numeric(10,3) AS avg_107_943 FROM aggtest;
avg_107_943
-------------
107.943
(1 row)
SELECT avg(gpa) AS avg_3_4 FROM ONLY student;
avg_3_4
---------
3.4
(1 row)
SELECT sum(four) AS sum_1500 FROM onek;
sum_1500
----------
1500
(1 row)
SELECT sum(a) AS sum_198 FROM aggtest;
sum_198
---------
198
(1 row)
SELECT sum(b) AS avg_431_773 FROM aggtest;
avg_431_773
-------------
431.773
(1 row)
SELECT sum(gpa) AS avg_6_8 FROM ONLY student;
avg_6_8
---------
6.8
(1 row)
SELECT max(four) AS max_3 FROM onek;
max_3
-------
3
(1 row)
SELECT max(a) AS max_100 FROM aggtest;
max_100
---------
100
(1 row)
SELECT max(aggtest.b) AS max_324_78 FROM aggtest;
max_324_78
------------
324.78
(1 row)
SELECT max(student.gpa) AS max_3_7 FROM student;
max_3_7
---------
3.7
(1 row)
SELECT stddev_pop(b) FROM aggtest;
stddev_pop
-----------------
131.10703231895
(1 row)
SELECT stddev_samp(b) FROM aggtest;
stddev_samp
------------------
151.389360803998
(1 row)
SELECT var_pop(b) FROM aggtest;
var_pop
------------------
17189.0539234823
(1 row)
SELECT var_samp(b) FROM aggtest;
var_samp
------------------
22918.7385646431
(1 row)
SELECT stddev_pop(b::numeric) FROM aggtest;
stddev_pop
------------------
131.107032862199
(1 row)
SELECT stddev_samp(b::numeric) FROM aggtest;
stddev_samp
------------------
151.389361431288
(1 row)
SELECT var_pop(b::numeric) FROM aggtest;
var_pop
--------------------
17189.054065929769
(1 row)
SELECT var_samp(b::numeric) FROM aggtest;
var_samp
--------------------
22918.738754573025
(1 row)
-- population variance is defined for a single tuple, sample variance
-- is not
SELECT var_pop(1.0), var_samp(2.0);
var_pop | var_samp
---------+----------
0 |
(1 row)
SELECT stddev_pop(3.0::numeric), stddev_samp(4.0::numeric);
stddev_pop | stddev_samp
------------+-------------
0 |
(1 row)
-- verify correct results for null and NaN inputs
select sum(null::int4) from generate_series(1,3);
sum
-----
(1 row)
select sum(null::int8) from generate_series(1,3);
sum
-----
(1 row)
select sum(null::numeric) from generate_series(1,3);
sum
-----
(1 row)
select sum(null::float8) from generate_series(1,3);
sum
-----
(1 row)
select avg(null::int4) from generate_series(1,3);
avg
-----
(1 row)
select avg(null::int8) from generate_series(1,3);
avg
-----
(1 row)
select avg(null::numeric) from generate_series(1,3);
avg
-----
(1 row)
select avg(null::float8) from generate_series(1,3);
avg
-----
(1 row)
select sum('NaN'::numeric) from generate_series(1,3);
sum
-----
NaN
(1 row)
select avg('NaN'::numeric) from generate_series(1,3);
avg
-----
NaN
(1 row)
-- SQL2003 binary aggregates
SELECT regr_count(b, a) FROM aggtest;
regr_count
------------
4
(1 row)
SELECT regr_sxx(b, a) FROM aggtest;
regr_sxx
----------
5099
(1 row)
SELECT regr_syy(b, a) FROM aggtest;
regr_syy
------------------
68756.2156939293
(1 row)
SELECT regr_sxy(b, a) FROM aggtest;
regr_sxy
------------------
2614.51582155004
(1 row)
SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest;
regr_avgx | regr_avgy
-----------+------------------
49.5 | 107.943152273074
(1 row)
SELECT regr_r2(b, a) FROM aggtest;
regr_r2
--------------------
0.0194977982031803
(1 row)
SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest;
regr_slope | regr_intercept
-------------------+------------------
0.512750700441271 | 82.5619926012309
(1 row)
SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest;
covar_pop | covar_samp
-----------------+------------------
653.62895538751 | 871.505273850014
(1 row)
SELECT corr(b, a) FROM aggtest;
corr
-------------------
0.139634516517873
(1 row)
SELECT count(four) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT count(DISTINCT four) AS cnt_4 FROM onek;
cnt_4
-------
4
(1 row)
select ten, count(*), sum(four) from onek
group by ten order by ten;
ten | count | sum
-----+-------+-----
0 | 100 | 100
1 | 100 | 200
2 | 100 | 100
3 | 100 | 200
4 | 100 | 100
5 | 100 | 200
6 | 100 | 100
7 | 100 | 200
8 | 100 | 100
9 | 100 | 200
(10 rows)
select ten, count(four), sum(DISTINCT four) from onek
group by ten order by ten;
ten | count | sum
-----+-------+-----
0 | 100 | 2
1 | 100 | 4
2 | 100 | 2
3 | 100 | 4
4 | 100 | 2
5 | 100 | 4
6 | 100 | 2
7 | 100 | 4
8 | 100 | 2
9 | 100 | 4
(10 rows)
-- user-defined aggregates
SELECT newavg(four) AS avg_1 FROM onek;
avg_1
--------------------
1.5000000000000000
(1 row)
SELECT newsum(four) AS sum_1500 FROM onek;
sum_1500
----------
1500
(1 row)
SELECT newcnt(four) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT newcnt(*) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT oldcnt(*) AS cnt_1000 FROM onek;
cnt_1000
----------
1000
(1 row)
SELECT sum2(q1,q2) FROM int8_tbl;
sum2
-------------------
18271560493827981
(1 row)
-- test for outer-level aggregates
-- this should work
select ten, sum(distinct four) from onek a
group by ten
having exists (select 1 from onek b where sum(distinct a.four) = b.four);
ten | sum
-----+-----
0 | 2
2 | 2
4 | 2
6 | 2
8 | 2
(5 rows)
-- this should fail because subquery has an agg of its own in WHERE
select ten, sum(distinct four) from onek a
group by ten
having exists (select 1 from onek b
where sum(distinct a.four + b.four) = b.four);
ERROR: aggregate functions are not allowed in WHERE
LINE 4: where sum(distinct a.four + b.four) = b.four)...
^
-- Test handling of sublinks within outer-level aggregates.
-- Per bug report from Daniel Grace.
select
(select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1)))
from tenk1 o;
max
------
9999
(1 row)
--
-- test for bitwise integer aggregates
--
CREATE TEMPORARY TABLE bitwise_test(
i2 INT2,
i4 INT4,
i8 INT8,
i INTEGER,
x INT2,
y BIT(4)
);
-- empty case
SELECT
BIT_AND(i2) AS "?",
BIT_OR(i4) AS "?"
FROM bitwise_test;
? | ?
---+---
|
(1 row)
COPY bitwise_test FROM STDIN NULL 'null';
SELECT
BIT_AND(i2) AS "1",
BIT_AND(i4) AS "1",
BIT_AND(i8) AS "1",
BIT_AND(i) AS "?",
BIT_AND(x) AS "0",
BIT_AND(y) AS "0100",
BIT_OR(i2) AS "7",
BIT_OR(i4) AS "7",
BIT_OR(i8) AS "7",
BIT_OR(i) AS "?",
BIT_OR(x) AS "7",
BIT_OR(y) AS "1101"
FROM bitwise_test;
1 | 1 | 1 | ? | 0 | 0100 | 7 | 7 | 7 | ? | 7 | 1101
---+---+---+---+---+------+---+---+---+---+---+------
1 | 1 | 1 | 1 | 0 | 0100 | 7 | 7 | 7 | 3 | 7 | 1101
(1 row)
--
-- test boolean aggregates
--
-- first test all possible transition and final states
SELECT
-- boolean and transitions
-- null because strict
booland_statefunc(NULL, NULL) IS NULL AS "t",
booland_statefunc(TRUE, NULL) IS NULL AS "t",
booland_statefunc(FALSE, NULL) IS NULL AS "t",
booland_statefunc(NULL, TRUE) IS NULL AS "t",
booland_statefunc(NULL, FALSE) IS NULL AS "t",
-- and actual computations
booland_statefunc(TRUE, TRUE) AS "t",
NOT booland_statefunc(TRUE, FALSE) AS "t",
NOT booland_statefunc(FALSE, TRUE) AS "t",
NOT booland_statefunc(FALSE, FALSE) AS "t";
t | t | t | t | t | t | t | t | t
---+---+---+---+---+---+---+---+---
t | t | t | t | t | t | t | t | t
(1 row)
SELECT
-- boolean or transitions
-- null because strict
boolor_statefunc(NULL, NULL) IS NULL AS "t",
boolor_statefunc(TRUE, NULL) IS NULL AS "t",
boolor_statefunc(FALSE, NULL) IS NULL AS "t",
boolor_statefunc(NULL, TRUE) IS NULL AS "t",
boolor_statefunc(NULL, FALSE) IS NULL AS "t",
-- actual computations
boolor_statefunc(TRUE, TRUE) AS "t",
boolor_statefunc(TRUE, FALSE) AS "t",
boolor_statefunc(FALSE, TRUE) AS "t",
NOT boolor_statefunc(FALSE, FALSE) AS "t";
t | t | t | t | t | t | t | t | t
---+---+---+---+---+---+---+---+---
t | t | t | t | t | t | t | t | t
(1 row)
CREATE TEMPORARY TABLE bool_test(
b1 BOOL,
b2 BOOL,
b3 BOOL,
b4 BOOL);
-- empty case
SELECT
BOOL_AND(b1) AS "n",
BOOL_OR(b3) AS "n"
FROM bool_test;
n | n
---+---
|
(1 row)
COPY bool_test FROM STDIN NULL 'null';
SELECT
BOOL_AND(b1) AS "f",
BOOL_AND(b2) AS "t",
BOOL_AND(b3) AS "f",
BOOL_AND(b4) AS "n",
BOOL_AND(NOT b2) AS "f",
BOOL_AND(NOT b3) AS "t"
FROM bool_test;
f | t | f | n | f | t
---+---+---+---+---+---
f | t | f | | f | t
(1 row)
SELECT
EVERY(b1) AS "f",
EVERY(b2) AS "t",
EVERY(b3) AS "f",
EVERY(b4) AS "n",
EVERY(NOT b2) AS "f",
EVERY(NOT b3) AS "t"
FROM bool_test;
f | t | f | n | f | t
---+---+---+---+---+---
f | t | f | | f | t
(1 row)
SELECT
BOOL_OR(b1) AS "t",
BOOL_OR(b2) AS "t",
BOOL_OR(b3) AS "f",
BOOL_OR(b4) AS "n",
BOOL_OR(NOT b2) AS "f",
BOOL_OR(NOT b3) AS "t"
FROM bool_test;
t | t | f | n | f | t
---+---+---+---+---+---
t | t | f | | f | t
(1 row)
--
-- Test cases that should be optimized into indexscans instead of
-- the generic aggregate implementation.
--
-- Basic cases
explain (costs off)
select min(unique1) from tenk1;
QUERY PLAN
------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan using tenk1_unique1 on tenk1
Index Cond: (unique1 IS NOT NULL)
(5 rows)
select min(unique1) from tenk1;
min
-----
0
(1 row)
explain (costs off)
select max(unique1) from tenk1;
QUERY PLAN
---------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: (unique1 IS NOT NULL)
(5 rows)
select max(unique1) from tenk1;
max
------
9999
(1 row)
explain (costs off)
select max(unique1) from tenk1 where unique1 < 42;
QUERY PLAN
------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 < 42))
(5 rows)
select max(unique1) from tenk1 where unique1 < 42;
max
-----
41
(1 row)
explain (costs off)
select max(unique1) from tenk1 where unique1 > 42;
QUERY PLAN
------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42))
(5 rows)
select max(unique1) from tenk1 where unique1 > 42;
max
------
9999
(1 row)
explain (costs off)
select max(unique1) from tenk1 where unique1 > 42000;
QUERY PLAN
---------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42000))
(5 rows)
select max(unique1) from tenk1 where unique1 > 42000;
max
-----
(1 row)
-- multi-column index (uses tenk1_thous_tenthous)
explain (costs off)
select max(tenthous) from tenk1 where thousand = 33;
QUERY PLAN
----------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_thous_tenthous on tenk1
Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL))
(5 rows)
select max(tenthous) from tenk1 where thousand = 33;
max
------
9033
(1 row)
explain (costs off)
select min(tenthous) from tenk1 where thousand = 33;
QUERY PLAN
--------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan using tenk1_thous_tenthous on tenk1
Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL))
(5 rows)
select min(tenthous) from tenk1 where thousand = 33;
min
-----
33
(1 row)
-- check parameter propagation into an indexscan subquery
explain (costs off)
select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
from int4_tbl;
QUERY PLAN
-----------------------------------------------------------------------------------------
Seq Scan on int4_tbl
SubPlan 2
-> Result
InitPlan 1 (returns $1)
-> Limit
-> Index Only Scan using tenk1_unique1 on tenk1
Index Cond: ((unique1 IS NOT NULL) AND (unique1 > int4_tbl.f1))
(7 rows)
select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
from int4_tbl;
f1 | gt
-------------+----
0 | 1
123456 |
-123456 | 0
2147483647 |
-2147483647 | 0
(5 rows)
-- check some cases that were handled incorrectly in 8.3.0
explain (costs off)
select distinct max(unique2) from tenk1;
QUERY PLAN
---------------------------------------------------------------------
HashAggregate
Group Key: $0
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select distinct max(unique2) from tenk1;
max
------
9999
(1 row)
explain (costs off)
select max(unique2) from tenk1 order by 1;
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: ($0)
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select max(unique2) from tenk1 order by 1;
max
------
9999
(1 row)
explain (costs off)
select max(unique2) from tenk1 order by max(unique2);
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: ($0)
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select max(unique2) from tenk1 order by max(unique2);
max
------
9999
(1 row)
explain (costs off)
select max(unique2) from tenk1 order by max(unique2)+1;
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: (($0 + 1))
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select max(unique2) from tenk1 order by max(unique2)+1;
max
------
9999
(1 row)
explain (costs off)
select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
QUERY PLAN
---------------------------------------------------------------------
Sort
Sort Key: (generate_series(1, 3)) DESC
InitPlan 1 (returns $0)
-> Limit
-> Index Only Scan Backward using tenk1_unique2 on tenk1
Index Cond: (unique2 IS NOT NULL)
-> Result
(7 rows)
select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
max | g
------+---
9999 | 3
9999 | 2
9999 | 1
(3 rows)
-- try it on an inheritance tree
create table minmaxtest(f1 int);
create table minmaxtest1() inherits (minmaxtest);
create table minmaxtest2() inherits (minmaxtest);
create table minmaxtest3() inherits (minmaxtest);
create index minmaxtesti on minmaxtest(f1);
create index minmaxtest1i on minmaxtest1(f1);
create index minmaxtest2i on minmaxtest2(f1 desc);
create index minmaxtest3i on minmaxtest3(f1) where f1 is not null;
insert into minmaxtest values(11), (12);
insert into minmaxtest1 values(13), (14);
insert into minmaxtest2 values(15), (16);
insert into minmaxtest3 values(17), (18);
explain (costs off)
select min(f1), max(f1) from minmaxtest;
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
QUERY PLAN
----------------------------------------------------------------------------------------------
Result
InitPlan 1 (returns $0)
-> Limit
-> Merge Append
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
Sort Key: minmaxtest.f1
-> Index Only Scan using minmaxtesti on minmaxtest
Index Cond: (f1 IS NOT NULL)
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
-> Index Only Scan using minmaxtest1i on minmaxtest1
Index Cond: (f1 IS NOT NULL)
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
-> Index Only Scan Backward using minmaxtest2i on minmaxtest2
Index Cond: (f1 IS NOT NULL)
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
-> Index Only Scan using minmaxtest3i on minmaxtest3
Index Cond: (f1 IS NOT NULL)
InitPlan 2 (returns $1)
-> Limit
-> Merge Append
Sort Key: minmaxtest_1.f1 DESC
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
-> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_1
Index Cond: (f1 IS NOT NULL)
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
-> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest1_1
Index Cond: (f1 IS NOT NULL)
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
-> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest2_1
Index Cond: (f1 IS NOT NULL)
Improve ruleutils.c's heuristics for dealing with rangetable aliases. The previous scheme had bugs in some corner cases involving tables that had been renamed since a view was made. This could result in dumped views that failed to reload or reloaded incorrectly, as seen in bug #7553 from Lloyd Albin, as well as in some pgsql-hackers discussion back in January. Also, its behavior for printing EXPLAIN plans was sometimes confusing because of willingness to use the same alias for multiple RTEs (it was Ashutosh Bapat's complaint about that aspect that started the January thread). To fix, ensure that each RTE in the query has a unique unqualified alias, by modifying the alias if necessary (we add "_" and digits as needed to create a non-conflicting name). Then we can just print its variables with that alias, avoiding the confusing and bug-prone scheme of sometimes schema-qualifying variable names. In EXPLAIN, it proves to be expedient to take the further step of only assigning such aliases to RTEs that are actually referenced in the query, since the planner has a habit of generating extra RTEs with the same alias in situations such as inheritance-tree expansion. Although this fixes a bug of very long standing, I'm hesitant to back-patch such a noticeable behavioral change. My experiments while creating a regression test convinced me that actually incorrect output (as opposed to confusing output) occurs only in very narrow cases, which is backed up by the lack of previous complaints from the field. So we may be better off living with it in released branches; and in any case it'd be smart to let this ripen awhile in HEAD before we consider back-patching it.
2012-09-22 01:03:10 +02:00
-> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest3_1
Index Cond: (f1 IS NOT NULL)
(25 rows)
select min(f1), max(f1) from minmaxtest;
min | max
-----+-----
11 | 18
(1 row)
-- DISTINCT doesn't do anything useful here, but it shouldn't fail
explain (costs off)
select distinct min(f1), max(f1) from minmaxtest;
QUERY PLAN
----------------------------------------------------------------------------------------------
Make the upper part of the planner work by generating and comparing Paths. I've been saying we needed to do this for more than five years, and here it finally is. This patch removes the ever-growing tangle of spaghetti logic that grouping_planner() used to use to try to identify the best plan for post-scan/join query steps. Now, there is (nearly) independent consideration of each execution step, and entirely separate construction of Paths to represent each of the possible ways to do that step. We choose the best Path or set of Paths using the same add_path() logic that's been used inside query_planner() for years. In addition, this patch removes the old restriction that subquery_planner() could return only a single Plan. It now returns a RelOptInfo containing a set of Paths, just as query_planner() does, and the parent query level can use each of those Paths as the basis of a SubqueryScanPath at its level. This allows finding some optimizations that we missed before, wherein a subquery was capable of returning presorted data and thereby avoiding a sort in the parent level, making the overall cost cheaper even though delivering sorted output was not the cheapest plan for the subquery in isolation. (A couple of regression test outputs change in consequence of that. However, there is very little change in visible planner behavior overall, because the point of this patch is not to get immediate planning benefits but to create the infrastructure for future improvements.) There is a great deal left to do here. This patch unblocks a lot of planner work that was basically impractical in the old code structure, such as allowing FDWs to implement remote aggregation, or rewriting plan_set_operations() to allow consideration of multiple implementation orders for set operations. (The latter will likely require a full rewrite of plan_set_operations(); what I've done here is only to fix it to return Paths not Plans.) I have also left unfinished some localized refactoring in createplan.c and planner.c, because it was not necessary to get this patch to a working state. Thanks to Robert Haas, David Rowley, and Amit Kapila for review.
2016-03-07 21:58:22 +01:00
Unique
InitPlan 1 (returns $0)
-> Limit
-> Merge Append
Sort Key: minmaxtest.f1
-> Index Only Scan using minmaxtesti on minmaxtest
Index Cond: (f1 IS NOT NULL)
-> Index Only Scan using minmaxtest1i on minmaxtest1
Index Cond: (f1 IS NOT NULL)
-> Index Only Scan Backward using minmaxtest2i on minmaxtest2
Index Cond: (f1 IS NOT NULL)
-> Index Only Scan using minmaxtest3i on minmaxtest3
Index Cond: (f1 IS NOT NULL)
InitPlan 2 (returns $1)
-> Limit
-> Merge Append
Sort Key: minmaxtest_1.f1 DESC
-> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_1
Index Cond: (f1 IS NOT NULL)
-> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest1_1
Index Cond: (f1 IS NOT NULL)
-> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest2_1
Index Cond: (f1 IS NOT NULL)
-> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest3_1
Index Cond: (f1 IS NOT NULL)
Make the upper part of the planner work by generating and comparing Paths. I've been saying we needed to do this for more than five years, and here it finally is. This patch removes the ever-growing tangle of spaghetti logic that grouping_planner() used to use to try to identify the best plan for post-scan/join query steps. Now, there is (nearly) independent consideration of each execution step, and entirely separate construction of Paths to represent each of the possible ways to do that step. We choose the best Path or set of Paths using the same add_path() logic that's been used inside query_planner() for years. In addition, this patch removes the old restriction that subquery_planner() could return only a single Plan. It now returns a RelOptInfo containing a set of Paths, just as query_planner() does, and the parent query level can use each of those Paths as the basis of a SubqueryScanPath at its level. This allows finding some optimizations that we missed before, wherein a subquery was capable of returning presorted data and thereby avoiding a sort in the parent level, making the overall cost cheaper even though delivering sorted output was not the cheapest plan for the subquery in isolation. (A couple of regression test outputs change in consequence of that. However, there is very little change in visible planner behavior overall, because the point of this patch is not to get immediate planning benefits but to create the infrastructure for future improvements.) There is a great deal left to do here. This patch unblocks a lot of planner work that was basically impractical in the old code structure, such as allowing FDWs to implement remote aggregation, or rewriting plan_set_operations() to allow consideration of multiple implementation orders for set operations. (The latter will likely require a full rewrite of plan_set_operations(); what I've done here is only to fix it to return Paths not Plans.) I have also left unfinished some localized refactoring in createplan.c and planner.c, because it was not necessary to get this patch to a working state. Thanks to Robert Haas, David Rowley, and Amit Kapila for review.
2016-03-07 21:58:22 +01:00
-> Sort
Sort Key: ($0), ($1)
-> Result
(28 rows)
select distinct min(f1), max(f1) from minmaxtest;
min | max
-----+-----
11 | 18
(1 row)
drop table minmaxtest cascade;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table minmaxtest1
drop cascades to table minmaxtest2
drop cascades to table minmaxtest3
-- check for correct detection of nested-aggregate errors
select max(min(unique1)) from tenk1;
ERROR: aggregate function calls cannot be nested
LINE 1: select max(min(unique1)) from tenk1;
^
select (select max(min(unique1)) from int8_tbl) from tenk1;
ERROR: aggregate function calls cannot be nested
LINE 1: select (select max(min(unique1)) from int8_tbl) from tenk1;
^
--
-- Test removal of redundant GROUP BY columns
--
create temp table t1 (a int, b int, c int, d int, primary key (a, b));
create temp table t2 (x int, y int, z int, primary key (x, y));
create temp table t3 (a int, b int, c int, primary key(a, b) deferrable);
-- Non-primary-key columns can be removed from GROUP BY
explain (costs off) select * from t1 group by a,b,c,d;
QUERY PLAN
----------------------
HashAggregate
Group Key: a, b
-> Seq Scan on t1
(3 rows)
-- No removal can happen if the complete PK is not present in GROUP BY
explain (costs off) select a,c from t1 group by a,c,d;
QUERY PLAN
----------------------
HashAggregate
Group Key: a, c, d
-> Seq Scan on t1
(3 rows)
-- Test removal across multiple relations
explain (costs off) select *
from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.y,t2.z;
QUERY PLAN
-------------------------------------------------------
Group
Group Key: t1.a, t1.b, t2.x, t2.y
-> Merge Join
Merge Cond: ((t1.a = t2.x) AND (t1.b = t2.y))
-> Index Scan using t1_pkey on t1
-> Index Scan using t2_pkey on t2
(6 rows)
-- Test case where t1 can be optimized but not t2
explain (costs off) select t1.*,t2.x,t2.z
from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.z;
QUERY PLAN
-------------------------------------------------------
HashAggregate
Group Key: t1.a, t1.b, t2.x, t2.z
-> Merge Join
Merge Cond: ((t1.a = t2.x) AND (t1.b = t2.y))
-> Index Scan using t1_pkey on t1
-> Index Scan using t2_pkey on t2
(6 rows)
-- Cannot optimize when PK is deferrable
explain (costs off) select * from t3 group by a,b,c;
QUERY PLAN
----------------------
HashAggregate
Group Key: a, b, c
-> Seq Scan on t3
(3 rows)
drop table t1;
drop table t2;
drop table t3;
--
-- Test combinations of DISTINCT and/or ORDER BY
--
select array_agg(a order by b)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{3,4,2,1}
(1 row)
select array_agg(a order by a)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{1,2,3,4}
(1 row)
select array_agg(a order by a desc)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{4,3,2,1}
(1 row)
select array_agg(b order by a desc)
from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
array_agg
-----------
{2,1,3,4}
(1 row)
select array_agg(distinct a)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{1,2,3,NULL}
(1 row)
select array_agg(distinct a order by a)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{1,2,3,NULL}
(1 row)
select array_agg(distinct a order by a desc)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{NULL,3,2,1}
(1 row)
select array_agg(distinct a order by a desc nulls last)
from (values (1),(2),(1),(3),(null),(2)) v(a);
array_agg
--------------
{3,2,1,NULL}
(1 row)
-- multi-arg aggs, strict/nonstrict, distinct/order by
select aggfstr(a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
aggfstr
---------------------------------------
{"(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfns(a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
aggfns
-----------------------------------------------
{"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfstr(distinct a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfstr
---------------------------------------
{"(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfns(distinct a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select aggfstr(distinct a,b,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfstr
---------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)"}
(1 row)
select aggfns(distinct a,b,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
aggfns
-----------------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
(1 row)
-- test specific code paths
select aggfns(distinct a,a,c order by c using ~<~,a)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
------------------------------------------------
{"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"}
(1 row)
select aggfns(distinct a,a,c order by c using ~<~)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
------------------------------------------------
{"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"}
(1 row)
select aggfns(distinct a,a,c order by a)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
------------------------------------------------
{"(0,0,)","(1,1,foo)","(2,2,bar)","(3,3,baz)"}
(1 row)
select aggfns(distinct a,b,c order by a,c using ~<~,b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
-- check node I/O via view creation and usage, also deparsing logic
create view agg_view1 as
select aggfns(a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
-----------------------------------------------
{"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(v.a, v.b, v.c) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(distinct a,b,c)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
select * from agg_view1;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(DISTINCT v.a, v.b, v.c) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
generate_series(1, 3) i(i);
(1 row)
create or replace view agg_view1 as
select aggfns(distinct a,b,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,3) i;
select * from agg_view1;
aggfns
-----------------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.b) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
generate_series(1, 3) i(i);
(1 row)
create or replace view agg_view1 as
select aggfns(a,b,c order by b+1)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
-----------------------------------------------
{"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(v.a, v.b, v.c ORDER BY (v.b + 1)) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(a,a,c order by b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
------------------------------------------------
{"(3,3,baz)","(2,2,bar)","(1,1,foo)","(0,0,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(v.a, v.a, v.c ORDER BY v.b) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(a,b,c order by c using ~<~)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
select * from agg_view1;
aggfns
-----------------------------------------------
{"(2,2,bar)","(3,1,baz)","(1,3,foo)","(0,,)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(v.a, v.b, v.c ORDER BY v.c USING ~<~ NULLS LAST) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
(1 row)
create or replace view agg_view1 as
select aggfns(distinct a,b,c order by a,c using ~<~,b)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
select * from agg_view1;
aggfns
-----------------------------------------------
{"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
(1 row)
select pg_get_viewdef('agg_view1'::regclass);
pg_get_viewdef
---------------------------------------------------------------------------------------------------------------------
SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.a, v.c USING ~<~ NULLS LAST, v.b) AS aggfns +
FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
generate_series(1, 2) i(i);
(1 row)
drop view agg_view1;
-- incorrect DISTINCT usage errors
select aggfns(distinct a,b,c order by i)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,b,c order by i)
^
select aggfns(distinct a,b,c order by a,b+1)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,b,c order by a,b+1)
^
select aggfns(distinct a,b,c order by a,b,i,c)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,b,c order by a,b,i,c)
^
select aggfns(distinct a,a,c order by a,b)
from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select aggfns(distinct a,a,c order by a,b)
^
-- string_agg tests
select string_agg(a,',') from (values('aaaa'),('bbbb'),('cccc')) g(a);
string_agg
----------------
aaaa,bbbb,cccc
(1 row)
select string_agg(a,',') from (values('aaaa'),(null),('bbbb'),('cccc')) g(a);
string_agg
----------------
aaaa,bbbb,cccc
(1 row)
select string_agg(a,'AB') from (values(null),(null),('bbbb'),('cccc')) g(a);
string_agg
------------
bbbbABcccc
(1 row)
select string_agg(a,',') from (values(null),(null)) g(a);
string_agg
------------
(1 row)
-- check some implicit casting cases, as per bug #5564
select string_agg(distinct f1, ',' order by f1) from varchar_tbl; -- ok
string_agg
------------
a,ab,abcd
(1 row)
select string_agg(distinct f1::text, ',' order by f1) from varchar_tbl; -- not ok
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select string_agg(distinct f1::text, ',' order by f1) from v...
^
select string_agg(distinct f1, ',' order by f1::text) from varchar_tbl; -- not ok
ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
LINE 1: select string_agg(distinct f1, ',' order by f1::text) from v...
^
select string_agg(distinct f1::text, ',' order by f1::text) from varchar_tbl; -- ok
string_agg
------------
a,ab,abcd
(1 row)
-- string_agg bytea tests
create table bytea_test_table(v bytea);
select string_agg(v, '') from bytea_test_table;
string_agg
------------
(1 row)
insert into bytea_test_table values(decode('ff','hex'));
select string_agg(v, '') from bytea_test_table;
string_agg
------------
\xff
(1 row)
insert into bytea_test_table values(decode('aa','hex'));
select string_agg(v, '') from bytea_test_table;
string_agg
------------
\xffaa
(1 row)
select string_agg(v, NULL) from bytea_test_table;
string_agg
------------
\xffaa
(1 row)
select string_agg(v, decode('ee', 'hex')) from bytea_test_table;
string_agg
------------
\xffeeaa
(1 row)
drop table bytea_test_table;
-- FILTER tests
select min(unique1) filter (where unique1 > 100) from tenk1;
min
-----
101
(1 row)
select ten, sum(distinct four) filter (where four::text ~ '123') from onek a
group by ten;
ten | sum
-----+-----
0 |
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
(10 rows)
select ten, sum(distinct four) filter (where four > 10) from onek a
group by ten
having exists (select 1 from onek b where sum(distinct a.four) = b.four);
ten | sum
-----+-----
0 |
2 |
4 |
6 |
8 |
(5 rows)
select max(foo COLLATE "C") filter (where (bar collate "POSIX") > '0')
from (values ('a', 'b')) AS v(foo,bar);
max
-----
a
(1 row)
-- outer reference in FILTER (PostgreSQL extension)
select (select count(*)
from (values (1)) t0(inner_c))
from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
count
-------
1
1
(2 rows)
select (select count(*) filter (where outer_c <> 0)
from (values (1)) t0(inner_c))
from (values (2),(3)) t1(outer_c); -- outer query is aggregation query
count
-------
2
(1 row)
select (select count(inner_c) filter (where outer_c <> 0)
from (values (1)) t0(inner_c))
from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
count
-------
1
1
(2 rows)
select
(select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1))
filter (where o.unique1 < 10))
from tenk1 o; -- outer query is aggregation query
max
------
9998
(1 row)
-- subquery in FILTER clause (PostgreSQL extension)
select sum(unique1) FILTER (WHERE
unique1 IN (SELECT unique1 FROM onek where unique1 < 100)) FROM tenk1;
sum
------
4950
(1 row)
-- exercise lots of aggregate parts with FILTER
select aggfns(distinct a,b,c order by a,c using ~<~,b) filter (where a > 1)
from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
generate_series(1,2) i;
aggfns
---------------------------
{"(2,2,bar)","(3,1,baz)"}
(1 row)
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
-- ordered-set aggregates
select p, percentile_cont(p) within group (order by x::float8)
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
p | percentile_cont
------+-----------------
0 | 1
0.1 | 1.4
0.25 | 2
0.4 | 2.6
0.5 | 3
0.6 | 3.4
0.75 | 4
0.9 | 4.6
1 | 5
(9 rows)
select p, percentile_cont(p order by p) within group (order by x) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: cannot use multiple ORDER BY clauses with WITHIN GROUP
LINE 1: select p, percentile_cont(p order by p) within group (order ...
^
select p, sum() within group (order by x::float8) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: sum is not an ordered-set aggregate, so it cannot have WITHIN GROUP
LINE 1: select p, sum() within group (order by x::float8)
^
select p, percentile_cont(p,p) -- error
from generate_series(1,5) x,
(values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
group by p order by p;
ERROR: WITHIN GROUP is required for ordered-set aggregate percentile_cont
LINE 1: select p, percentile_cont(p,p)
^
select percentile_cont(0.5) within group (order by b) from aggtest;
percentile_cont
------------------
53.4485001564026
(1 row)
select percentile_cont(0.5) within group (order by b), sum(b) from aggtest;
percentile_cont | sum
------------------+---------
53.4485001564026 | 431.773
(1 row)
select percentile_cont(0.5) within group (order by thousand) from tenk1;
percentile_cont
-----------------
499.5
(1 row)
select percentile_disc(0.5) within group (order by thousand) from tenk1;
percentile_disc
-----------------
499
(1 row)
select rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
rank
------
5
(1 row)
select cume_dist(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
cume_dist
-----------
0.875
(1 row)
select percent_rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4),(5)) v(x);
percent_rank
--------------
0.5
(1 row)
select dense_rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
dense_rank
------------
3
(1 row)
select percentile_disc(array[0,0.1,0.25,0.5,0.75,0.9,1]) within group (order by thousand)
from tenk1;
percentile_disc
----------------------------
{0,99,249,499,749,899,999}
(1 row)
select percentile_cont(array[0,0.25,0.5,0.75,1]) within group (order by thousand)
from tenk1;
percentile_cont
-----------------------------
{0,249.75,499.5,749.25,999}
(1 row)
select percentile_disc(array[[null,1,0.5],[0.75,0.25,null]]) within group (order by thousand)
from tenk1;
percentile_disc
---------------------------------
{{NULL,999,499},{749,249,NULL}}
(1 row)
select percentile_cont(array[0,1,0.25,0.75,0.5,1,0.3,0.32,0.35,0.38,0.4]) within group (order by x)
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
from generate_series(1,6) x;
percentile_cont
------------------------------------------
{1,6,2.25,4.75,3.5,6,2.5,2.6,2.75,2.9,3}
Support ordered-set (WITHIN GROUP) aggregates. This patch introduces generic support for ordered-set and hypothetical-set aggregate functions, as well as implementations of the instances defined in SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(), percent_rank(), cume_dist()). We also added mode() though it is not in the spec, as well as versions of percentile_cont() and percentile_disc() that can compute multiple percentile values in one pass over the data. Unlike the original submission, this patch puts full control of the sorting process in the hands of the aggregate's support functions. To allow the support functions to find out how they're supposed to sort, a new API function AggGetAggref() is added to nodeAgg.c. This allows retrieval of the aggregate call's Aggref node, which may have other uses beyond the immediate need. There is also support for ordered-set aggregates to install cleanup callback functions, so that they can be sure that infrastructure such as tuplesort objects gets cleaned up. In passing, make some fixes in the recently-added support for variadic aggregates, and make some editorial adjustments in the recent FILTER additions for aggregates. Also, simplify use of IsBinaryCoercible() by allowing it to succeed whenever the target type is ANY or ANYELEMENT. It was inconsistent that it dealt with other polymorphic target types but not these. Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing, and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
(1 row)
select ten, mode() within group (order by string4) from tenk1 group by ten;
ten | mode
-----+--------
0 | HHHHxx
1 | OOOOxx
2 | VVVVxx
3 | OOOOxx
4 | HHHHxx
5 | HHHHxx
6 | OOOOxx
7 | AAAAxx
8 | VVVVxx
9 | VVVVxx
(10 rows)
select percentile_disc(array[0.25,0.5,0.75]) within group (order by x)
from unnest('{fred,jim,fred,jack,jill,fred,jill,jim,jim,sheila,jim,sheila}'::text[]) u(x);
percentile_disc
-----------------
{fred,jill,jim}
(1 row)
-- check collation propagates up in suitable cases:
select pg_collation_for(percentile_disc(1) within group (order by x collate "POSIX"))
from (values ('fred'),('jim')) v(x);
pg_collation_for
------------------
"POSIX"
(1 row)
-- ordered-set aggs created with CREATE AGGREGATE
select test_rank(3) within group (order by x)
from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
test_rank
-----------
5
(1 row)
select test_percentile_disc(0.5) within group (order by thousand) from tenk1;
test_percentile_disc
----------------------
499
(1 row)
-- ordered-set aggs can't use ungrouped vars in direct args:
select rank(x) within group (order by x) from generate_series(1,5) x;
ERROR: column "x.x" must appear in the GROUP BY clause or be used in an aggregate function
LINE 1: select rank(x) within group (order by x) from generate_serie...
^
DETAIL: Direct arguments of an ordered-set aggregate must use only grouped columns.
-- outer-level agg can't use a grouped arg of a lower level, either:
select array(select percentile_disc(a) within group (order by x)
from (values (0.3),(0.7)) v(a) group by a)
from generate_series(1,5) g(x);
ERROR: outer-level aggregate cannot contain a lower-level variable in its direct arguments
LINE 1: select array(select percentile_disc(a) within group (order b...
^
-- agg in the direct args is a grouping violation, too:
select rank(sum(x)) within group (order by x) from generate_series(1,5) x;
ERROR: aggregate function calls cannot be nested
LINE 1: select rank(sum(x)) within group (order by x) from generate_...
^
-- hypothetical-set type unification and argument-count failures:
select rank(3) within group (order by x) from (values ('fred'),('jim')) v(x);
ERROR: WITHIN GROUP types text and integer cannot be matched
LINE 1: select rank(3) within group (order by x) from (values ('fred...
^
select rank(3) within group (order by stringu1,stringu2) from tenk1;
ERROR: function rank(integer, name, name) does not exist
LINE 1: select rank(3) within group (order by stringu1,stringu2) fro...
^
HINT: To use the hypothetical-set aggregate rank, the number of hypothetical direct arguments (here 1) must match the number of ordering columns (here 2).
select rank('fred') within group (order by x) from generate_series(1,5) x;
ERROR: invalid input syntax for integer: "fred"
LINE 1: select rank('fred') within group (order by x) from generate_...
^
select rank('adam'::text collate "C") within group (order by x collate "POSIX")
from (values ('fred'),('jim')) v(x);
ERROR: collation mismatch between explicit collations "C" and "POSIX"
LINE 1: ...adam'::text collate "C") within group (order by x collate "P...
^
-- hypothetical-set type unification successes:
select rank('adam'::varchar) within group (order by x) from (values ('fred'),('jim')) v(x);
rank
------
1
(1 row)
select rank('3') within group (order by x) from generate_series(1,5) x;
rank
------
3
(1 row)
-- divide by zero check
select percent_rank(0) within group (order by x) from generate_series(1,0) x;
percent_rank
--------------
0
(1 row)
-- deparse and multiple features:
create view aggordview1 as
select ten,
percentile_disc(0.5) within group (order by thousand) as p50,
percentile_disc(0.5) within group (order by thousand) filter (where hundred=1) as px,
rank(5,'AZZZZ',50) within group (order by hundred, string4 desc, hundred)
from tenk1
group by ten order by ten;
select pg_get_viewdef('aggordview1');
pg_get_viewdef
-------------------------------------------------------------------------------------------------------------------------------
SELECT tenk1.ten, +
percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) AS p50, +
percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) FILTER (WHERE (tenk1.hundred = 1)) AS px,+
rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY tenk1.hundred, tenk1.string4 DESC, tenk1.hundred) AS rank +
FROM tenk1 +
GROUP BY tenk1.ten +
ORDER BY tenk1.ten;
(1 row)
select * from aggordview1 order by ten;
ten | p50 | px | rank
-----+-----+-----+------
0 | 490 | | 101
1 | 491 | 401 | 101
2 | 492 | | 101
3 | 493 | | 101
4 | 494 | | 101
5 | 495 | | 67
6 | 496 | | 1
7 | 497 | | 1
8 | 498 | | 1
9 | 499 | | 1
(10 rows)
drop view aggordview1;
-- variadic aggregates
select least_agg(q1,q2) from int8_tbl;
least_agg
-------------------
-4567890123456789
(1 row)
select least_agg(variadic array[q1,q2]) from int8_tbl;
least_agg
-------------------
-4567890123456789
(1 row)
-- test aggregates with common transition functions share the same states
begin work;
create type avg_state as (total bigint, count bigint);
create or replace function avg_transfn(state avg_state, n int) returns avg_state as
$$
declare new_state avg_state;
begin
raise notice 'avg_transfn called with %', n;
if state is null then
if n is not null then
new_state.total := n;
new_state.count := 1;
return new_state;
end if;
return null;
elsif n is not null then
state.total := state.total + n;
state.count := state.count + 1;
return state;
end if;
return null;
end
$$ language plpgsql;
create function avg_finalfn(state avg_state) returns int4 as
$$
begin
if state is null then
return NULL;
else
return state.total / state.count;
end if;
end
$$ language plpgsql;
create function sum_finalfn(state avg_state) returns int4 as
$$
begin
if state is null then
return NULL;
else
return state.total;
end if;
end
$$ language plpgsql;
create aggregate my_avg(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = avg_finalfn
);
create aggregate my_sum(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = sum_finalfn
);
-- aggregate state should be shared as aggs are the same.
select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_avg | my_avg
--------+--------
2 | 2
(1 row)
-- aggregate state should be shared as transfn is the same for both aggs.
select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
2 | 4
(1 row)
-- shouldn't share states due to the distinctness not matching.
select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
2 | 4
(1 row)
-- shouldn't share states due to the filter clause not matching.
select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
3 | 4
(1 row)
-- this should not share the state due to different input columns.
select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
NOTICE: avg_transfn called with 2
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 4
NOTICE: avg_transfn called with 3
my_avg | my_sum
--------+--------
2 | 6
(1 row)
-- test that aggs with the same sfunc and initcond share the same agg state
create aggregate my_sum_init(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = sum_finalfn,
initcond = '(10,0)'
);
create aggregate my_avg_init(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = avg_finalfn,
initcond = '(10,0)'
);
create aggregate my_avg_init2(int4)
(
stype = avg_state,
sfunc = avg_transfn,
finalfunc = avg_finalfn,
initcond = '(4,0)'
);
-- state should be shared if INITCONDs are matching
select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
my_sum_init | my_avg_init
-------------+-------------
14 | 7
(1 row)
-- Varying INITCONDs should cause the states not to be shared.
select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 1
NOTICE: avg_transfn called with 3
NOTICE: avg_transfn called with 3
my_sum_init | my_avg_init2
-------------+--------------
14 | 4
(1 row)
rollback;
-- test aggregate state sharing to ensure it works if one aggregate has a
-- finalfn and the other one has none.
begin work;
create or replace function sum_transfn(state int4, n int4) returns int4 as
$$
declare new_state int4;
begin
raise notice 'sum_transfn called with %', n;
if state is null then
if n is not null then
new_state := n;
return new_state;
end if;
return null;
elsif n is not null then
state := state + n;
return state;
end if;
return null;
end
$$ language plpgsql;
create function halfsum_finalfn(state int4) returns int4 as
$$
begin
if state is null then
return NULL;
else
return state / 2;
end if;
end
$$ language plpgsql;
create aggregate my_sum(int4)
(
stype = int4,
sfunc = sum_transfn
);
create aggregate my_half_sum(int4)
(
stype = int4,
sfunc = sum_transfn,
finalfunc = halfsum_finalfn
);
-- Agg state should be shared even though my_sum has no finalfn
select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
NOTICE: sum_transfn called with 1
NOTICE: sum_transfn called with 2
NOTICE: sum_transfn called with 3
NOTICE: sum_transfn called with 4
my_sum | my_half_sum
--------+-------------
10 | 5
(1 row)
rollback;