-- -- AGGREGATES -- -- directory paths are passed to us in environment variables \getenv abs_srcdir PG_ABS_SRCDIR -- avoid bit-exact output here because operations may not be bit-exact. SET extra_float_digits = 0; -- prepare some test data CREATE TABLE aggtest ( a int2, b float4 ); \set filename :abs_srcdir '/data/agg.data' COPY aggtest FROM :'filename'; ANALYZE aggtest; SELECT avg(four) AS avg_1 FROM onek; avg_1 -------------------- 1.5000000000000000 (1 row) SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100; avg_32 --------------------- 32.6666666666666667 (1 row) SELECT any_value(v) FROM (VALUES (1), (2), (3)) AS v (v); any_value ----------- 1 (1 row) SELECT any_value(v) FROM (VALUES (NULL)) AS v (v); any_value ----------- (1 row) SELECT any_value(v) FROM (VALUES (NULL), (1), (2)) AS v (v); any_value ----------- 1 (1 row) SELECT any_value(v) FROM (VALUES (array['hello', 'world'])) AS v (v); any_value --------------- {hello,world} (1 row) -- In 7.1, avg(float4) is computed using float8 arithmetic. -- Round the result to 3 digits to avoid platform-specific results. SELECT avg(b)::numeric(10,3) AS avg_107_943 FROM aggtest; avg_107_943 ------------- 107.943 (1 row) SELECT avg(gpa) AS avg_3_4 FROM ONLY student; avg_3_4 --------- 3.4 (1 row) SELECT sum(four) AS sum_1500 FROM onek; sum_1500 ---------- 1500 (1 row) SELECT sum(a) AS sum_198 FROM aggtest; sum_198 --------- 198 (1 row) SELECT sum(b) AS avg_431_773 FROM aggtest; avg_431_773 ------------- 431.773 (1 row) SELECT sum(gpa) AS avg_6_8 FROM ONLY student; avg_6_8 --------- 6.8 (1 row) SELECT max(four) AS max_3 FROM onek; max_3 ------- 3 (1 row) SELECT max(a) AS max_100 FROM aggtest; max_100 --------- 100 (1 row) SELECT max(aggtest.b) AS max_324_78 FROM aggtest; max_324_78 ------------ 324.78 (1 row) SELECT max(student.gpa) AS max_3_7 FROM student; max_3_7 --------- 3.7 (1 row) SELECT stddev_pop(b) FROM aggtest; stddev_pop ----------------- 131.10703231895 (1 row) SELECT stddev_samp(b) FROM aggtest; stddev_samp ------------------ 151.389360803998 (1 row) SELECT var_pop(b) FROM aggtest; var_pop ------------------ 17189.0539234823 (1 row) SELECT var_samp(b) FROM aggtest; var_samp ------------------ 22918.7385646431 (1 row) SELECT stddev_pop(b::numeric) FROM aggtest; stddev_pop ------------------ 131.107032862199 (1 row) SELECT stddev_samp(b::numeric) FROM aggtest; stddev_samp ------------------ 151.389361431288 (1 row) SELECT var_pop(b::numeric) FROM aggtest; var_pop -------------------- 17189.054065929769 (1 row) SELECT var_samp(b::numeric) FROM aggtest; var_samp -------------------- 22918.738754573025 (1 row) -- population variance is defined for a single tuple, sample variance -- is not SELECT var_pop(1.0::float8), var_samp(2.0::float8); var_pop | var_samp ---------+---------- 0 | (1 row) SELECT stddev_pop(3.0::float8), stddev_samp(4.0::float8); stddev_pop | stddev_samp ------------+------------- 0 | (1 row) SELECT var_pop('inf'::float8), var_samp('inf'::float8); var_pop | var_samp ---------+---------- NaN | (1 row) SELECT stddev_pop('inf'::float8), stddev_samp('inf'::float8); stddev_pop | stddev_samp ------------+------------- NaN | (1 row) SELECT var_pop('nan'::float8), var_samp('nan'::float8); var_pop | var_samp ---------+---------- NaN | (1 row) SELECT stddev_pop('nan'::float8), stddev_samp('nan'::float8); stddev_pop | stddev_samp ------------+------------- NaN | (1 row) SELECT var_pop(1.0::float4), var_samp(2.0::float4); var_pop | var_samp ---------+---------- 0 | (1 row) SELECT stddev_pop(3.0::float4), stddev_samp(4.0::float4); stddev_pop | stddev_samp ------------+------------- 0 | (1 row) SELECT var_pop('inf'::float4), var_samp('inf'::float4); var_pop | var_samp ---------+---------- NaN | (1 row) SELECT stddev_pop('inf'::float4), stddev_samp('inf'::float4); stddev_pop | stddev_samp ------------+------------- NaN | (1 row) SELECT var_pop('nan'::float4), var_samp('nan'::float4); var_pop | var_samp ---------+---------- NaN | (1 row) SELECT stddev_pop('nan'::float4), stddev_samp('nan'::float4); stddev_pop | stddev_samp ------------+------------- NaN | (1 row) SELECT var_pop(1.0::numeric), var_samp(2.0::numeric); var_pop | var_samp ---------+---------- 0 | (1 row) SELECT stddev_pop(3.0::numeric), stddev_samp(4.0::numeric); stddev_pop | stddev_samp ------------+------------- 0 | (1 row) SELECT var_pop('inf'::numeric), var_samp('inf'::numeric); var_pop | var_samp ---------+---------- NaN | (1 row) SELECT stddev_pop('inf'::numeric), stddev_samp('inf'::numeric); stddev_pop | stddev_samp ------------+------------- NaN | (1 row) SELECT var_pop('nan'::numeric), var_samp('nan'::numeric); var_pop | var_samp ---------+---------- NaN | (1 row) SELECT stddev_pop('nan'::numeric), stddev_samp('nan'::numeric); stddev_pop | stddev_samp ------------+------------- NaN | (1 row) -- verify correct results for null and NaN inputs select sum(null::int4) from generate_series(1,3); sum ----- (1 row) select sum(null::int8) from generate_series(1,3); sum ----- (1 row) select sum(null::numeric) from generate_series(1,3); sum ----- (1 row) select sum(null::float8) from generate_series(1,3); sum ----- (1 row) select avg(null::int4) from generate_series(1,3); avg ----- (1 row) select avg(null::int8) from generate_series(1,3); avg ----- (1 row) select avg(null::numeric) from generate_series(1,3); avg ----- (1 row) select avg(null::float8) from generate_series(1,3); avg ----- (1 row) select sum('NaN'::numeric) from generate_series(1,3); sum ----- NaN (1 row) select avg('NaN'::numeric) from generate_series(1,3); avg ----- NaN (1 row) -- verify correct results for infinite inputs SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) FROM (VALUES ('1'), ('infinity')) v(x); sum | avg | var_pop ----------+----------+--------- Infinity | Infinity | NaN (1 row) SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) FROM (VALUES ('infinity'), ('1')) v(x); sum | avg | var_pop ----------+----------+--------- Infinity | Infinity | NaN (1 row) SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) FROM (VALUES ('infinity'), ('infinity')) v(x); sum | avg | var_pop ----------+----------+--------- Infinity | Infinity | NaN (1 row) SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) FROM (VALUES ('-infinity'), ('infinity')) v(x); sum | avg | var_pop -----+-----+--------- NaN | NaN | NaN (1 row) SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) FROM (VALUES ('-infinity'), ('-infinity')) v(x); sum | avg | var_pop -----------+-----------+--------- -Infinity | -Infinity | NaN (1 row) SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) FROM (VALUES ('1'), ('infinity')) v(x); sum | avg | var_pop ----------+----------+--------- Infinity | Infinity | NaN (1 row) SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) FROM (VALUES ('infinity'), ('1')) v(x); sum | avg | var_pop ----------+----------+--------- Infinity | Infinity | NaN (1 row) SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) FROM (VALUES ('infinity'), ('infinity')) v(x); sum | avg | var_pop ----------+----------+--------- Infinity | Infinity | NaN (1 row) SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) FROM (VALUES ('-infinity'), ('infinity')) v(x); sum | avg | var_pop -----+-----+--------- NaN | NaN | NaN (1 row) SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) FROM (VALUES ('-infinity'), ('-infinity')) v(x); sum | avg | var_pop -----------+-----------+--------- -Infinity | -Infinity | NaN (1 row) -- test accuracy with a large input offset SELECT avg(x::float8), var_pop(x::float8) FROM (VALUES (100000003), (100000004), (100000006), (100000007)) v(x); avg | var_pop -----------+--------- 100000005 | 2.5 (1 row) SELECT avg(x::float8), var_pop(x::float8) FROM (VALUES (7000000000005), (7000000000007)) v(x); avg | var_pop ---------------+--------- 7000000000006 | 1 (1 row) -- SQL2003 binary aggregates SELECT regr_count(b, a) FROM aggtest; regr_count ------------ 4 (1 row) SELECT regr_sxx(b, a) FROM aggtest; regr_sxx ---------- 5099 (1 row) SELECT regr_syy(b, a) FROM aggtest; regr_syy ------------------ 68756.2156939293 (1 row) SELECT regr_sxy(b, a) FROM aggtest; regr_sxy ------------------ 2614.51582155004 (1 row) SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest; regr_avgx | regr_avgy -----------+------------------ 49.5 | 107.943152273074 (1 row) SELECT regr_r2(b, a) FROM aggtest; regr_r2 -------------------- 0.0194977982031803 (1 row) SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest; regr_slope | regr_intercept -------------------+------------------ 0.512750700441271 | 82.5619926012309 (1 row) SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest; covar_pop | covar_samp -----------------+------------------ 653.62895538751 | 871.505273850014 (1 row) SELECT corr(b, a) FROM aggtest; corr ------------------- 0.139634516517873 (1 row) -- check single-tuple behavior SELECT covar_pop(1::float8,2::float8), covar_samp(3::float8,4::float8); covar_pop | covar_samp -----------+------------ 0 | (1 row) SELECT covar_pop(1::float8,'inf'::float8), covar_samp(3::float8,'inf'::float8); covar_pop | covar_samp -----------+------------ NaN | (1 row) SELECT covar_pop(1::float8,'nan'::float8), covar_samp(3::float8,'nan'::float8); covar_pop | covar_samp -----------+------------ NaN | (1 row) -- test accum and combine functions directly CREATE TABLE regr_test (x float8, y float8); INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200); SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) FROM regr_test WHERE x IN (10,20,30,80); count | sum | regr_sxx | sum | regr_syy | regr_sxy -------+-----+----------+------+----------+---------- 4 | 140 | 2900 | 1290 | 83075 | 15050 (1 row) SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) FROM regr_test; count | sum | regr_sxx | sum | regr_syy | regr_sxy -------+-----+----------+------+----------+---------- 5 | 240 | 6280 | 1490 | 95080 | 8680 (1 row) SELECT float8_accum('{4,140,2900}'::float8[], 100); float8_accum -------------- {5,240,6280} (1 row) SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100); float8_regr_accum ------------------------------ {5,240,6280,1490,95080,8680} (1 row) SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) FROM regr_test WHERE x IN (10,20,30); count | sum | regr_sxx | sum | regr_syy | regr_sxy -------+-----+----------+-----+----------+---------- 3 | 60 | 200 | 750 | 20000 | 2000 (1 row) SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) FROM regr_test WHERE x IN (80,100); count | sum | regr_sxx | sum | regr_syy | regr_sxy -------+-----+----------+-----+----------+---------- 2 | 180 | 200 | 740 | 57800 | -3400 (1 row) SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]); float8_combine ---------------- {3,60,200} (1 row) SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]); float8_combine ---------------- {2,180,200} (1 row) SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]); float8_combine ---------------- {5,240,6280} (1 row) SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], '{0,0,0,0,0,0}'::float8[]); float8_regr_combine --------------------------- {3,60,200,750,20000,2000} (1 row) SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[], '{2,180,200,740,57800,-3400}'::float8[]); float8_regr_combine ----------------------------- {2,180,200,740,57800,-3400} (1 row) SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], '{2,180,200,740,57800,-3400}'::float8[]); float8_regr_combine ------------------------------ {5,240,6280,1490,95080,8680} (1 row) DROP TABLE regr_test; -- test count, distinct SELECT count(four) AS cnt_1000 FROM onek; cnt_1000 ---------- 1000 (1 row) SELECT count(DISTINCT four) AS cnt_4 FROM onek; cnt_4 ------- 4 (1 row) select ten, count(*), sum(four) from onek group by ten order by ten; ten | count | sum -----+-------+----- 0 | 100 | 100 1 | 100 | 200 2 | 100 | 100 3 | 100 | 200 4 | 100 | 100 5 | 100 | 200 6 | 100 | 100 7 | 100 | 200 8 | 100 | 100 9 | 100 | 200 (10 rows) select ten, count(four), sum(DISTINCT four) from onek group by ten order by ten; ten | count | sum -----+-------+----- 0 | 100 | 2 1 | 100 | 4 2 | 100 | 2 3 | 100 | 4 4 | 100 | 2 5 | 100 | 4 6 | 100 | 2 7 | 100 | 4 8 | 100 | 2 9 | 100 | 4 (10 rows) -- user-defined aggregates SELECT newavg(four) AS avg_1 FROM onek; avg_1 -------------------- 1.5000000000000000 (1 row) SELECT newsum(four) AS sum_1500 FROM onek; sum_1500 ---------- 1500 (1 row) SELECT newcnt(four) AS cnt_1000 FROM onek; cnt_1000 ---------- 1000 (1 row) SELECT newcnt(*) AS cnt_1000 FROM onek; cnt_1000 ---------- 1000 (1 row) SELECT oldcnt(*) AS cnt_1000 FROM onek; cnt_1000 ---------- 1000 (1 row) SELECT sum2(q1,q2) FROM int8_tbl; sum2 ------------------- 18271560493827981 (1 row) -- test for outer-level aggregates -- this should work select ten, sum(distinct four) from onek a group by ten having exists (select 1 from onek b where sum(distinct a.four) = b.four); ten | sum -----+----- 0 | 2 2 | 2 4 | 2 6 | 2 8 | 2 (5 rows) -- this should fail because subquery has an agg of its own in WHERE select ten, sum(distinct four) from onek a group by ten having exists (select 1 from onek b where sum(distinct a.four + b.four) = b.four); ERROR: aggregate functions are not allowed in WHERE LINE 4: where sum(distinct a.four + b.four) = b.four)... ^ -- Test handling of sublinks within outer-level aggregates. -- Per bug report from Daniel Grace. select (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1))) from tenk1 o; max ------ 9999 (1 row) -- Test handling of Params within aggregate arguments in hashed aggregation. -- Per bug report from Jeevan Chalke. explain (verbose, costs off) select s1, s2, sm from generate_series(1, 3) s1, lateral (select s2, sum(s1 + s2) sm from generate_series(1, 3) s2 group by s2) ss order by 1, 2; QUERY PLAN ------------------------------------------------------------------ Sort Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2))) Sort Key: s1.s1, s2.s2 -> Nested Loop Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2))) -> Function Scan on pg_catalog.generate_series s1 Output: s1.s1 Function Call: generate_series(1, 3) -> HashAggregate Output: s2.s2, sum((s1.s1 + s2.s2)) Group Key: s2.s2 -> Function Scan on pg_catalog.generate_series s2 Output: s2.s2 Function Call: generate_series(1, 3) (14 rows) select s1, s2, sm from generate_series(1, 3) s1, lateral (select s2, sum(s1 + s2) sm from generate_series(1, 3) s2 group by s2) ss order by 1, 2; s1 | s2 | sm ----+----+---- 1 | 1 | 2 1 | 2 | 3 1 | 3 | 4 2 | 1 | 3 2 | 2 | 4 2 | 3 | 5 3 | 1 | 4 3 | 2 | 5 3 | 3 | 6 (9 rows) explain (verbose, costs off) select array(select sum(x+y) s from generate_series(1,3) y group by y order by s) from generate_series(1,3) x; QUERY PLAN ------------------------------------------------------------------- Function Scan on pg_catalog.generate_series x Output: ARRAY(SubPlan 1) Function Call: generate_series(1, 3) SubPlan 1 -> Sort Output: (sum((x.x + y.y))), y.y Sort Key: (sum((x.x + y.y))) -> HashAggregate Output: sum((x.x + y.y)), y.y Group Key: y.y -> Function Scan on pg_catalog.generate_series y Output: y.y Function Call: generate_series(1, 3) (13 rows) select array(select sum(x+y) s from generate_series(1,3) y group by y order by s) from generate_series(1,3) x; array --------- {2,3,4} {3,4,5} {4,5,6} (3 rows) -- -- test for bitwise integer aggregates -- CREATE TEMPORARY TABLE bitwise_test( i2 INT2, i4 INT4, i8 INT8, i INTEGER, x INT2, y BIT(4) ); -- empty case SELECT BIT_AND(i2) AS "?", BIT_OR(i4) AS "?", BIT_XOR(i8) AS "?" FROM bitwise_test; ? | ? | ? ---+---+--- | | (1 row) COPY bitwise_test FROM STDIN NULL 'null'; SELECT BIT_AND(i2) AS "1", BIT_AND(i4) AS "1", BIT_AND(i8) AS "1", BIT_AND(i) AS "?", BIT_AND(x) AS "0", BIT_AND(y) AS "0100", BIT_OR(i2) AS "7", BIT_OR(i4) AS "7", BIT_OR(i8) AS "7", BIT_OR(i) AS "?", BIT_OR(x) AS "7", BIT_OR(y) AS "1101", BIT_XOR(i2) AS "5", BIT_XOR(i4) AS "5", BIT_XOR(i8) AS "5", BIT_XOR(i) AS "?", BIT_XOR(x) AS "7", BIT_XOR(y) AS "1101" FROM bitwise_test; 1 | 1 | 1 | ? | 0 | 0100 | 7 | 7 | 7 | ? | 7 | 1101 | 5 | 5 | 5 | ? | 7 | 1101 ---+---+---+---+---+------+---+---+---+---+---+------+---+---+---+---+---+------ 1 | 1 | 1 | 1 | 0 | 0100 | 7 | 7 | 7 | 3 | 7 | 1101 | 5 | 5 | 5 | 2 | 7 | 1101 (1 row) -- -- test boolean aggregates -- -- first test all possible transition and final states SELECT -- boolean and transitions -- null because strict booland_statefunc(NULL, NULL) IS NULL AS "t", booland_statefunc(TRUE, NULL) IS NULL AS "t", booland_statefunc(FALSE, NULL) IS NULL AS "t", booland_statefunc(NULL, TRUE) IS NULL AS "t", booland_statefunc(NULL, FALSE) IS NULL AS "t", -- and actual computations booland_statefunc(TRUE, TRUE) AS "t", NOT booland_statefunc(TRUE, FALSE) AS "t", NOT booland_statefunc(FALSE, TRUE) AS "t", NOT booland_statefunc(FALSE, FALSE) AS "t"; t | t | t | t | t | t | t | t | t ---+---+---+---+---+---+---+---+--- t | t | t | t | t | t | t | t | t (1 row) SELECT -- boolean or transitions -- null because strict boolor_statefunc(NULL, NULL) IS NULL AS "t", boolor_statefunc(TRUE, NULL) IS NULL AS "t", boolor_statefunc(FALSE, NULL) IS NULL AS "t", boolor_statefunc(NULL, TRUE) IS NULL AS "t", boolor_statefunc(NULL, FALSE) IS NULL AS "t", -- actual computations boolor_statefunc(TRUE, TRUE) AS "t", boolor_statefunc(TRUE, FALSE) AS "t", boolor_statefunc(FALSE, TRUE) AS "t", NOT boolor_statefunc(FALSE, FALSE) AS "t"; t | t | t | t | t | t | t | t | t ---+---+---+---+---+---+---+---+--- t | t | t | t | t | t | t | t | t (1 row) CREATE TEMPORARY TABLE bool_test( b1 BOOL, b2 BOOL, b3 BOOL, b4 BOOL); -- empty case SELECT BOOL_AND(b1) AS "n", BOOL_OR(b3) AS "n" FROM bool_test; n | n ---+--- | (1 row) COPY bool_test FROM STDIN NULL 'null'; SELECT BOOL_AND(b1) AS "f", BOOL_AND(b2) AS "t", BOOL_AND(b3) AS "f", BOOL_AND(b4) AS "n", BOOL_AND(NOT b2) AS "f", BOOL_AND(NOT b3) AS "t" FROM bool_test; f | t | f | n | f | t ---+---+---+---+---+--- f | t | f | | f | t (1 row) SELECT EVERY(b1) AS "f", EVERY(b2) AS "t", EVERY(b3) AS "f", EVERY(b4) AS "n", EVERY(NOT b2) AS "f", EVERY(NOT b3) AS "t" FROM bool_test; f | t | f | n | f | t ---+---+---+---+---+--- f | t | f | | f | t (1 row) SELECT BOOL_OR(b1) AS "t", BOOL_OR(b2) AS "t", BOOL_OR(b3) AS "f", BOOL_OR(b4) AS "n", BOOL_OR(NOT b2) AS "f", BOOL_OR(NOT b3) AS "t" FROM bool_test; t | t | f | n | f | t ---+---+---+---+---+--- t | t | f | | f | t (1 row) -- -- Test cases that should be optimized into indexscans instead of -- the generic aggregate implementation. -- -- Basic cases explain (costs off) select min(unique1) from tenk1; QUERY PLAN ------------------------------------------------------------ Result InitPlan 1 -> Limit -> Index Only Scan using tenk1_unique1 on tenk1 Index Cond: (unique1 IS NOT NULL) (5 rows) select min(unique1) from tenk1; min ----- 0 (1 row) explain (costs off) select max(unique1) from tenk1; QUERY PLAN --------------------------------------------------------------------- Result InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique1 on tenk1 Index Cond: (unique1 IS NOT NULL) (5 rows) select max(unique1) from tenk1; max ------ 9999 (1 row) explain (costs off) select max(unique1) from tenk1 where unique1 < 42; QUERY PLAN ------------------------------------------------------------------------ Result InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique1 on tenk1 Index Cond: ((unique1 IS NOT NULL) AND (unique1 < 42)) (5 rows) select max(unique1) from tenk1 where unique1 < 42; max ----- 41 (1 row) explain (costs off) select max(unique1) from tenk1 where unique1 > 42; QUERY PLAN ------------------------------------------------------------------------ Result InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique1 on tenk1 Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42)) (5 rows) select max(unique1) from tenk1 where unique1 > 42; max ------ 9999 (1 row) -- the planner may choose a generic aggregate here if parallel query is -- enabled, since that plan will be parallel safe and the "optimized" -- plan, which has almost identical cost, will not be. we want to test -- the optimized plan, so temporarily disable parallel query. begin; set local max_parallel_workers_per_gather = 0; explain (costs off) select max(unique1) from tenk1 where unique1 > 42000; QUERY PLAN --------------------------------------------------------------------------- Result InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique1 on tenk1 Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42000)) (5 rows) select max(unique1) from tenk1 where unique1 > 42000; max ----- (1 row) rollback; -- multi-column index (uses tenk1_thous_tenthous) explain (costs off) select max(tenthous) from tenk1 where thousand = 33; QUERY PLAN ---------------------------------------------------------------------------- Result InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_thous_tenthous on tenk1 Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL)) (5 rows) select max(tenthous) from tenk1 where thousand = 33; max ------ 9033 (1 row) explain (costs off) select min(tenthous) from tenk1 where thousand = 33; QUERY PLAN -------------------------------------------------------------------------- Result InitPlan 1 -> Limit -> Index Only Scan using tenk1_thous_tenthous on tenk1 Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL)) (5 rows) select min(tenthous) from tenk1 where thousand = 33; min ----- 33 (1 row) -- check parameter propagation into an indexscan subquery explain (costs off) select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt from int4_tbl; QUERY PLAN ----------------------------------------------------------------------------------------- Seq Scan on int4_tbl SubPlan 2 -> Result InitPlan 1 -> Limit -> Index Only Scan using tenk1_unique1 on tenk1 Index Cond: ((unique1 IS NOT NULL) AND (unique1 > int4_tbl.f1)) (7 rows) select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt from int4_tbl; f1 | gt -------------+---- 0 | 1 123456 | -123456 | 0 2147483647 | -2147483647 | 0 (5 rows) -- check some cases that were handled incorrectly in 8.3.0 explain (costs off) select distinct max(unique2) from tenk1; QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: (InitPlan 1).col1 InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique2 on tenk1 Index Cond: (unique2 IS NOT NULL) -> Result (7 rows) select distinct max(unique2) from tenk1; max ------ 9999 (1 row) explain (costs off) select max(unique2) from tenk1 order by 1; QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: ((InitPlan 1).col1) InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique2 on tenk1 Index Cond: (unique2 IS NOT NULL) -> Result (7 rows) select max(unique2) from tenk1 order by 1; max ------ 9999 (1 row) explain (costs off) select max(unique2) from tenk1 order by max(unique2); QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: ((InitPlan 1).col1) InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique2 on tenk1 Index Cond: (unique2 IS NOT NULL) -> Result (7 rows) select max(unique2) from tenk1 order by max(unique2); max ------ 9999 (1 row) explain (costs off) select max(unique2) from tenk1 order by max(unique2)+1; QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: (((InitPlan 1).col1 + 1)) InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique2 on tenk1 Index Cond: (unique2 IS NOT NULL) -> Result (7 rows) select max(unique2) from tenk1 order by max(unique2)+1; max ------ 9999 (1 row) explain (costs off) select max(unique2), generate_series(1,3) as g from tenk1 order by g desc; QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: (generate_series(1, 3)) DESC InitPlan 1 -> Limit -> Index Only Scan Backward using tenk1_unique2 on tenk1 Index Cond: (unique2 IS NOT NULL) -> ProjectSet -> Result (8 rows) select max(unique2), generate_series(1,3) as g from tenk1 order by g desc; max | g ------+--- 9999 | 3 9999 | 2 9999 | 1 (3 rows) -- interesting corner case: constant gets optimized into a seqscan explain (costs off) select max(100) from tenk1; QUERY PLAN ---------------------------------------------------- Result InitPlan 1 -> Limit -> Result One-Time Filter: (100 IS NOT NULL) -> Seq Scan on tenk1 (6 rows) select max(100) from tenk1; max ----- 100 (1 row) -- try it on an inheritance tree create table minmaxtest(f1 int); create table minmaxtest1() inherits (minmaxtest); create table minmaxtest2() inherits (minmaxtest); create table minmaxtest3() inherits (minmaxtest); create index minmaxtesti on minmaxtest(f1); create index minmaxtest1i on minmaxtest1(f1); create index minmaxtest2i on minmaxtest2(f1 desc); create index minmaxtest3i on minmaxtest3(f1) where f1 is not null; insert into minmaxtest values(11), (12); insert into minmaxtest1 values(13), (14); insert into minmaxtest2 values(15), (16); insert into minmaxtest3 values(17), (18); explain (costs off) select min(f1), max(f1) from minmaxtest; QUERY PLAN --------------------------------------------------------------------------------------------- Result InitPlan 1 -> Limit -> Merge Append Sort Key: minmaxtest.f1 -> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1 Index Cond: (f1 IS NOT NULL) -> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2 Index Cond: (f1 IS NOT NULL) -> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3 Index Cond: (f1 IS NOT NULL) -> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4 InitPlan 2 -> Limit -> Merge Append Sort Key: minmaxtest_5.f1 DESC -> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6 Index Cond: (f1 IS NOT NULL) -> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7 Index Cond: (f1 IS NOT NULL) -> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8 Index Cond: (f1 IS NOT NULL) -> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9 (23 rows) select min(f1), max(f1) from minmaxtest; min | max -----+----- 11 | 18 (1 row) -- DISTINCT doesn't do anything useful here, but it shouldn't fail explain (costs off) select distinct min(f1), max(f1) from minmaxtest; QUERY PLAN --------------------------------------------------------------------------------------------- Unique InitPlan 1 -> Limit -> Merge Append Sort Key: minmaxtest.f1 -> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1 Index Cond: (f1 IS NOT NULL) -> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2 Index Cond: (f1 IS NOT NULL) -> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3 Index Cond: (f1 IS NOT NULL) -> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4 InitPlan 2 -> Limit -> Merge Append Sort Key: minmaxtest_5.f1 DESC -> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6 Index Cond: (f1 IS NOT NULL) -> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7 Index Cond: (f1 IS NOT NULL) -> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8 Index Cond: (f1 IS NOT NULL) -> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9 -> Sort Sort Key: ((InitPlan 1).col1), ((InitPlan 2).col1) -> Result (26 rows) select distinct min(f1), max(f1) from minmaxtest; min | max -----+----- 11 | 18 (1 row) drop table minmaxtest cascade; NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table minmaxtest1 drop cascades to table minmaxtest2 drop cascades to table minmaxtest3 -- check for correct detection of nested-aggregate errors select max(min(unique1)) from tenk1; ERROR: aggregate function calls cannot be nested LINE 1: select max(min(unique1)) from tenk1; ^ select (select max(min(unique1)) from int8_tbl) from tenk1; ERROR: aggregate function calls cannot be nested LINE 1: select (select max(min(unique1)) from int8_tbl) from tenk1; ^ select avg((select avg(a1.col1 order by (select avg(a2.col2) from tenk1 a3)) from tenk1 a1(col1))) from tenk1 a2(col2); ERROR: aggregate function calls cannot be nested LINE 1: select avg((select avg(a1.col1 order by (select avg(a2.col2)... ^ -- -- Test removal of redundant GROUP BY columns -- create temp table t1 (a int, b int, c int, d int, primary key (a, b)); create temp table t2 (x int, y int, z int, primary key (x, y)); create temp table t3 (a int, b int, c int, primary key(a, b) deferrable); -- Non-primary-key columns can be removed from GROUP BY explain (costs off) select * from t1 group by a,b,c,d; QUERY PLAN ---------------------- HashAggregate Group Key: a, b -> Seq Scan on t1 (3 rows) -- No removal can happen if the complete PK is not present in GROUP BY explain (costs off) select a,c from t1 group by a,c,d; QUERY PLAN ---------------------- HashAggregate Group Key: a, c, d -> Seq Scan on t1 (3 rows) -- Test removal across multiple relations explain (costs off) select * from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.y,t2.z; QUERY PLAN ------------------------------------------------------ HashAggregate Group Key: t1.a, t1.b -> Hash Join Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b)) -> Seq Scan on t2 -> Hash -> Seq Scan on t1 (7 rows) -- Test case where t1 can be optimized but not t2 explain (costs off) select t1.*,t2.x,t2.z from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.z; QUERY PLAN ------------------------------------------------------ HashAggregate Group Key: t1.a, t1.b, t2.z -> Hash Join Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b)) -> Seq Scan on t2 -> Hash -> Seq Scan on t1 (7 rows) -- Cannot optimize when PK is deferrable explain (costs off) select * from t3 group by a,b,c; QUERY PLAN ---------------------- HashAggregate Group Key: a, b, c -> Seq Scan on t3 (3 rows) create temp table t1c () inherits (t1); -- Ensure we don't remove any columns when t1 has a child table explain (costs off) select * from t1 group by a,b,c,d; QUERY PLAN ------------------------------------- HashAggregate Group Key: t1.a, t1.b, t1.c, t1.d -> Append -> Seq Scan on t1 t1_1 -> Seq Scan on t1c t1_2 (5 rows) -- Okay to remove columns if we're only querying the parent. explain (costs off) select * from only t1 group by a,b,c,d; QUERY PLAN ---------------------- HashAggregate Group Key: a, b -> Seq Scan on t1 (3 rows) create temp table p_t1 ( a int, b int, c int, d int, primary key(a,b) ) partition by list(a); create temp table p_t1_1 partition of p_t1 for values in(1); create temp table p_t1_2 partition of p_t1 for values in(2); -- Ensure we can remove non-PK columns for partitioned tables. explain (costs off) select * from p_t1 group by a,b,c,d; QUERY PLAN -------------------------------- HashAggregate Group Key: p_t1.a, p_t1.b -> Append -> Seq Scan on p_t1_1 -> Seq Scan on p_t1_2 (5 rows) drop table t1 cascade; NOTICE: drop cascades to table t1c drop table t2; drop table t3; drop table p_t1; -- -- Test GROUP BY matching of join columns that are type-coerced due to USING -- create temp table t1(f1 int, f2 int); create temp table t2(f1 bigint, f2 oid); select f1 from t1 left join t2 using (f1) group by f1; f1 ---- (0 rows) select f1 from t1 left join t2 using (f1) group by t1.f1; f1 ---- (0 rows) select t1.f1 from t1 left join t2 using (f1) group by t1.f1; f1 ---- (0 rows) -- only this one should fail: select t1.f1 from t1 left join t2 using (f1) group by f1; ERROR: column "t1.f1" must appear in the GROUP BY clause or be used in an aggregate function LINE 1: select t1.f1 from t1 left join t2 using (f1) group by f1; ^ -- check case where we have to inject nullingrels into coerced join alias select f1, count(*) from t1 x(x0,x1) left join (t1 left join t2 using(f1)) on (x0 = 0) group by f1; f1 | count ----+------- (0 rows) -- same, for a RelabelType coercion select f2, count(*) from t1 x(x0,x1) left join (t1 left join t2 using(f2)) on (x0 = 0) group by f2; f2 | count ----+------- (0 rows) drop table t1, t2; -- -- Test planner's selection of pathkeys for ORDER BY aggregates -- -- Ensure we order by four. This suits the most aggregate functions. explain (costs off) select sum(two order by two),max(four order by four), min(four order by four) from tenk1; QUERY PLAN ------------------------------- Aggregate -> Sort Sort Key: four -> Seq Scan on tenk1 (4 rows) -- Ensure we order by two. It's a tie between ordering by two and four but -- we tiebreak on the aggregate's position. explain (costs off) select sum(two order by two), max(four order by four), min(four order by four), max(two order by two) from tenk1; QUERY PLAN ------------------------------- Aggregate -> Sort Sort Key: two -> Seq Scan on tenk1 (4 rows) -- Similar to above, but tiebreak on ordering by four explain (costs off) select max(four order by four), sum(two order by two), min(four order by four), max(two order by two) from tenk1; QUERY PLAN ------------------------------- Aggregate -> Sort Sort Key: four -> Seq Scan on tenk1 (4 rows) -- Ensure this one orders by ten since there are 3 aggregates that require ten -- vs two that suit two and four. explain (costs off) select max(four order by four), sum(two order by two), min(four order by four), max(two order by two), sum(ten order by ten), min(ten order by ten), max(ten order by ten) from tenk1; QUERY PLAN ------------------------------- Aggregate -> Sort Sort Key: ten -> Seq Scan on tenk1 (4 rows) -- Try a case involving a GROUP BY clause where the GROUP BY column is also -- part of an aggregate's ORDER BY clause. We want a sort order that works -- for the GROUP BY along with the first and the last aggregate. explain (costs off) select sum(unique1 order by ten, two), sum(unique1 order by four), sum(unique1 order by two, four) from tenk1 group by ten; QUERY PLAN ---------------------------------- GroupAggregate Group Key: ten -> Sort Sort Key: ten, two, four -> Seq Scan on tenk1 (5 rows) -- Ensure that we never choose to provide presorted input to an Aggref with -- a volatile function in the ORDER BY / DISTINCT clause. We want to ensure -- these sorts are performed individually rather than at the query level. explain (costs off) select sum(unique1 order by two), sum(unique1 order by four), sum(unique1 order by four, two), sum(unique1 order by two, random()), sum(unique1 order by two, random(), random() + 1) from tenk1 group by ten; QUERY PLAN ---------------------------------- GroupAggregate Group Key: ten -> Sort Sort Key: ten, four, two -> Seq Scan on tenk1 (5 rows) -- Ensure consecutive NULLs are properly treated as distinct from each other select array_agg(distinct val) from (select null as val from generate_series(1, 2)); array_agg ----------- {NULL} (1 row) -- Ensure no ordering is requested when enable_presorted_aggregate is off set enable_presorted_aggregate to off; explain (costs off) select sum(two order by two) from tenk1; QUERY PLAN ------------------------- Aggregate -> Seq Scan on tenk1 (2 rows) reset enable_presorted_aggregate; -- -- Test combinations of DISTINCT and/or ORDER BY -- select array_agg(a order by b) from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); array_agg ----------- {3,4,2,1} (1 row) select array_agg(a order by a) from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); array_agg ----------- {1,2,3,4} (1 row) select array_agg(a order by a desc) from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); array_agg ----------- {4,3,2,1} (1 row) select array_agg(b order by a desc) from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); array_agg ----------- {2,1,3,4} (1 row) select array_agg(distinct a) from (values (1),(2),(1),(3),(null),(2)) v(a); array_agg -------------- {1,2,3,NULL} (1 row) select array_agg(distinct a order by a) from (values (1),(2),(1),(3),(null),(2)) v(a); array_agg -------------- {1,2,3,NULL} (1 row) select array_agg(distinct a order by a desc) from (values (1),(2),(1),(3),(null),(2)) v(a); array_agg -------------- {NULL,3,2,1} (1 row) select array_agg(distinct a order by a desc nulls last) from (values (1),(2),(1),(3),(null),(2)) v(a); array_agg -------------- {3,2,1,NULL} (1 row) -- multi-arg aggs, strict/nonstrict, distinct/order by select aggfstr(a,b,c) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); aggfstr --------------------------------------- {"(1,3,foo)","(2,2,bar)","(3,1,baz)"} (1 row) select aggfns(a,b,c) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); aggfns ----------------------------------------------- {"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"} (1 row) select aggfstr(distinct a,b,c) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,3) i; aggfstr --------------------------------------- {"(1,3,foo)","(2,2,bar)","(3,1,baz)"} (1 row) select aggfns(distinct a,b,c) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,3) i; aggfns ----------------------------------------------- {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} (1 row) select aggfstr(distinct a,b,c order by b) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,3) i; aggfstr --------------------------------------- {"(3,1,baz)","(2,2,bar)","(1,3,foo)"} (1 row) select aggfns(distinct a,b,c order by b) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,3) i; aggfns ----------------------------------------------- {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"} (1 row) -- test specific code paths select aggfns(distinct a,a,c order by c using ~<~,a) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,2) i; aggfns ------------------------------------------------ {"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"} (1 row) select aggfns(distinct a,a,c order by c using ~<~) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,2) i; aggfns ------------------------------------------------ {"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"} (1 row) select aggfns(distinct a,a,c order by a) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,2) i; aggfns ------------------------------------------------ {"(0,0,)","(1,1,foo)","(2,2,bar)","(3,3,baz)"} (1 row) select aggfns(distinct a,b,c order by a,c using ~<~,b) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,2) i; aggfns ----------------------------------------------- {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} (1 row) -- test a more complex permutation that has previous caused issues select string_agg(distinct 'a', ','), sum(( select sum(1) from (values(1)) b(id) where a.id = b.id )) from unnest(array[1]) a(id); string_agg | sum ------------+----- a | 1 (1 row) -- check node I/O via view creation and usage, also deparsing logic create view agg_view1 as select aggfns(a,b,c) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); select * from agg_view1; aggfns ----------------------------------------------- {"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"} (1 row) select pg_get_viewdef('agg_view1'::regclass); pg_get_viewdef --------------------------------------------------------------------------------------------------------------------- SELECT aggfns(a, b, c) AS aggfns + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); (1 row) create or replace view agg_view1 as select aggfns(distinct a,b,c) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,3) i; select * from agg_view1; aggfns ----------------------------------------------- {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} (1 row) select pg_get_viewdef('agg_view1'::regclass); pg_get_viewdef --------------------------------------------------------------------------------------------------------------------- SELECT aggfns(DISTINCT v.a, v.b, v.c) AS aggfns + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+ generate_series(1, 3) i(i); (1 row) create or replace view agg_view1 as select aggfns(distinct a,b,c order by b) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,3) i; select * from agg_view1; aggfns ----------------------------------------------- {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"} (1 row) select pg_get_viewdef('agg_view1'::regclass); pg_get_viewdef --------------------------------------------------------------------------------------------------------------------- SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.b) AS aggfns + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+ generate_series(1, 3) i(i); (1 row) create or replace view agg_view1 as select aggfns(a,b,c order by b+1) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); select * from agg_view1; aggfns ----------------------------------------------- {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"} (1 row) select pg_get_viewdef('agg_view1'::regclass); pg_get_viewdef --------------------------------------------------------------------------------------------------------------------- SELECT aggfns(a, b, c ORDER BY (b + 1)) AS aggfns + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); (1 row) create or replace view agg_view1 as select aggfns(a,a,c order by b) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); select * from agg_view1; aggfns ------------------------------------------------ {"(3,3,baz)","(2,2,bar)","(1,1,foo)","(0,0,)"} (1 row) select pg_get_viewdef('agg_view1'::regclass); pg_get_viewdef --------------------------------------------------------------------------------------------------------------------- SELECT aggfns(a, a, c ORDER BY b) AS aggfns + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); (1 row) create or replace view agg_view1 as select aggfns(a,b,c order by c using ~<~) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); select * from agg_view1; aggfns ----------------------------------------------- {"(2,2,bar)","(3,1,baz)","(1,3,foo)","(0,,)"} (1 row) select pg_get_viewdef('agg_view1'::regclass); pg_get_viewdef --------------------------------------------------------------------------------------------------------------------- SELECT aggfns(a, b, c ORDER BY c USING ~<~ NULLS LAST) AS aggfns + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); (1 row) create or replace view agg_view1 as select aggfns(distinct a,b,c order by a,c using ~<~,b) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,2) i; select * from agg_view1; aggfns ----------------------------------------------- {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} (1 row) select pg_get_viewdef('agg_view1'::regclass); pg_get_viewdef --------------------------------------------------------------------------------------------------------------------- SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.a, v.c USING ~<~ NULLS LAST, v.b) AS aggfns + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+ generate_series(1, 2) i(i); (1 row) drop view agg_view1; -- incorrect DISTINCT usage errors select aggfns(distinct a,b,c order by i) from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list LINE 1: select aggfns(distinct a,b,c order by i) ^ select aggfns(distinct a,b,c order by a,b+1) from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list LINE 1: select aggfns(distinct a,b,c order by a,b+1) ^ select aggfns(distinct a,b,c order by a,b,i,c) from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list LINE 1: select aggfns(distinct a,b,c order by a,b,i,c) ^ select aggfns(distinct a,a,c order by a,b) from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list LINE 1: select aggfns(distinct a,a,c order by a,b) ^ -- string_agg tests select string_agg(a,',') from (values('aaaa'),('bbbb'),('cccc')) g(a); string_agg ---------------- aaaa,bbbb,cccc (1 row) select string_agg(a,',') from (values('aaaa'),(null),('bbbb'),('cccc')) g(a); string_agg ---------------- aaaa,bbbb,cccc (1 row) select string_agg(a,'AB') from (values(null),(null),('bbbb'),('cccc')) g(a); string_agg ------------ bbbbABcccc (1 row) select string_agg(a,',') from (values(null),(null)) g(a); string_agg ------------ (1 row) -- check some implicit casting cases, as per bug #5564 select string_agg(distinct f1, ',' order by f1) from varchar_tbl; -- ok string_agg ------------ a,ab,abcd (1 row) select string_agg(distinct f1::text, ',' order by f1) from varchar_tbl; -- not ok ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list LINE 1: select string_agg(distinct f1::text, ',' order by f1) from v... ^ select string_agg(distinct f1, ',' order by f1::text) from varchar_tbl; -- not ok ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list LINE 1: select string_agg(distinct f1, ',' order by f1::text) from v... ^ select string_agg(distinct f1::text, ',' order by f1::text) from varchar_tbl; -- ok string_agg ------------ a,ab,abcd (1 row) -- string_agg bytea tests create table bytea_test_table(v bytea); select string_agg(v, '') from bytea_test_table; string_agg ------------ (1 row) insert into bytea_test_table values(decode('ff','hex')); select string_agg(v, '') from bytea_test_table; string_agg ------------ \xff (1 row) insert into bytea_test_table values(decode('aa','hex')); select string_agg(v, '') from bytea_test_table; string_agg ------------ \xffaa (1 row) select string_agg(v, NULL) from bytea_test_table; string_agg ------------ \xffaa (1 row) select string_agg(v, decode('ee', 'hex')) from bytea_test_table; string_agg ------------ \xffeeaa (1 row) drop table bytea_test_table; -- Test parallel string_agg and array_agg create table pagg_test (x int, y int) with (autovacuum_enabled = off); insert into pagg_test select (case x % 4 when 1 then null else x end), x % 10 from generate_series(1,5000) x; set parallel_setup_cost TO 0; set parallel_tuple_cost TO 0; set parallel_leader_participation TO 0; set min_parallel_table_scan_size = 0; set bytea_output = 'escape'; set max_parallel_workers_per_gather = 2; -- create a view as we otherwise have to repeat this query a few times. create view v_pagg_test AS select y, min(t) AS tmin,max(t) AS tmax,count(distinct t) AS tndistinct, min(b) AS bmin,max(b) AS bmax,count(distinct b) AS bndistinct, min(a) AS amin,max(a) AS amax,count(distinct a) AS andistinct, min(aa) AS aamin,max(aa) AS aamax,count(distinct aa) AS aandistinct from ( select y, unnest(regexp_split_to_array(a1.t, ','))::int AS t, unnest(regexp_split_to_array(a1.b::text, ',')) AS b, unnest(a1.a) AS a, unnest(a1.aa) AS aa from ( select y, string_agg(x::text, ',') AS t, string_agg(x::text::bytea, ',') AS b, array_agg(x) AS a, array_agg(ARRAY[x]) AS aa from pagg_test group by y ) a1 ) a2 group by y; -- Ensure results are correct. select * from v_pagg_test order by y; y | tmin | tmax | tndistinct | bmin | bmax | bndistinct | amin | amax | andistinct | aamin | aamax | aandistinct ---+------+------+------------+------+------+------------+------+------+------------+-------+-------+------------- 0 | 10 | 5000 | 500 | 10 | 990 | 500 | 10 | 5000 | 500 | 10 | 5000 | 500 1 | 11 | 4991 | 250 | 1011 | 991 | 250 | 11 | 4991 | 250 | 11 | 4991 | 250 2 | 2 | 4992 | 500 | 1002 | 992 | 500 | 2 | 4992 | 500 | 2 | 4992 | 500 3 | 3 | 4983 | 250 | 1003 | 983 | 250 | 3 | 4983 | 250 | 3 | 4983 | 250 4 | 4 | 4994 | 500 | 1004 | 994 | 500 | 4 | 4994 | 500 | 4 | 4994 | 500 5 | 15 | 4995 | 250 | 1015 | 995 | 250 | 15 | 4995 | 250 | 15 | 4995 | 250 6 | 6 | 4996 | 500 | 1006 | 996 | 500 | 6 | 4996 | 500 | 6 | 4996 | 500 7 | 7 | 4987 | 250 | 1007 | 987 | 250 | 7 | 4987 | 250 | 7 | 4987 | 250 8 | 8 | 4998 | 500 | 1008 | 998 | 500 | 8 | 4998 | 500 | 8 | 4998 | 500 9 | 19 | 4999 | 250 | 1019 | 999 | 250 | 19 | 4999 | 250 | 19 | 4999 | 250 (10 rows) -- Ensure parallel aggregation is actually being used. explain (costs off) select * from v_pagg_test order by y; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------- GroupAggregate Group Key: pagg_test.y -> Sort Sort Key: pagg_test.y, (((unnest(regexp_split_to_array((string_agg((pagg_test.x)::text, ','::text)), ','::text))))::integer) -> Result -> ProjectSet -> Finalize HashAggregate Group Key: pagg_test.y -> Gather Workers Planned: 2 -> Partial HashAggregate Group Key: pagg_test.y -> Parallel Seq Scan on pagg_test (13 rows) set max_parallel_workers_per_gather = 0; -- Ensure results are the same without parallel aggregation. select * from v_pagg_test order by y; y | tmin | tmax | tndistinct | bmin | bmax | bndistinct | amin | amax | andistinct | aamin | aamax | aandistinct ---+------+------+------------+------+------+------------+------+------+------------+-------+-------+------------- 0 | 10 | 5000 | 500 | 10 | 990 | 500 | 10 | 5000 | 500 | 10 | 5000 | 500 1 | 11 | 4991 | 250 | 1011 | 991 | 250 | 11 | 4991 | 250 | 11 | 4991 | 250 2 | 2 | 4992 | 500 | 1002 | 992 | 500 | 2 | 4992 | 500 | 2 | 4992 | 500 3 | 3 | 4983 | 250 | 1003 | 983 | 250 | 3 | 4983 | 250 | 3 | 4983 | 250 4 | 4 | 4994 | 500 | 1004 | 994 | 500 | 4 | 4994 | 500 | 4 | 4994 | 500 5 | 15 | 4995 | 250 | 1015 | 995 | 250 | 15 | 4995 | 250 | 15 | 4995 | 250 6 | 6 | 4996 | 500 | 1006 | 996 | 500 | 6 | 4996 | 500 | 6 | 4996 | 500 7 | 7 | 4987 | 250 | 1007 | 987 | 250 | 7 | 4987 | 250 | 7 | 4987 | 250 8 | 8 | 4998 | 500 | 1008 | 998 | 500 | 8 | 4998 | 500 | 8 | 4998 | 500 9 | 19 | 4999 | 250 | 1019 | 999 | 250 | 19 | 4999 | 250 | 19 | 4999 | 250 (10 rows) -- Clean up reset max_parallel_workers_per_gather; reset bytea_output; reset min_parallel_table_scan_size; reset parallel_leader_participation; reset parallel_tuple_cost; reset parallel_setup_cost; drop view v_pagg_test; drop table pagg_test; -- FILTER tests select min(unique1) filter (where unique1 > 100) from tenk1; min ----- 101 (1 row) select sum(1/ten) filter (where ten > 0) from tenk1; sum ------ 1000 (1 row) select ten, sum(distinct four) filter (where four::text ~ '123') from onek a group by ten; ten | sum -----+----- 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | (10 rows) select ten, sum(distinct four) filter (where four > 10) from onek a group by ten having exists (select 1 from onek b where sum(distinct a.four) = b.four); ten | sum -----+----- 0 | 2 | 4 | 6 | 8 | (5 rows) select max(foo COLLATE "C") filter (where (bar collate "POSIX") > '0') from (values ('a', 'b')) AS v(foo,bar); max ----- a (1 row) select any_value(v) filter (where v > 2) from (values (1), (2), (3)) as v (v); any_value ----------- 3 (1 row) -- outer reference in FILTER (PostgreSQL extension) select (select count(*) from (values (1)) t0(inner_c)) from (values (2),(3)) t1(outer_c); -- inner query is aggregation query count ------- 1 1 (2 rows) select (select count(*) filter (where outer_c <> 0) from (values (1)) t0(inner_c)) from (values (2),(3)) t1(outer_c); -- outer query is aggregation query count ------- 2 (1 row) select (select count(inner_c) filter (where outer_c <> 0) from (values (1)) t0(inner_c)) from (values (2),(3)) t1(outer_c); -- inner query is aggregation query count ------- 1 1 (2 rows) select (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1)) filter (where o.unique1 < 10)) from tenk1 o; -- outer query is aggregation query max ------ 9998 (1 row) -- subquery in FILTER clause (PostgreSQL extension) select sum(unique1) FILTER (WHERE unique1 IN (SELECT unique1 FROM onek where unique1 < 100)) FROM tenk1; sum ------ 4950 (1 row) -- exercise lots of aggregate parts with FILTER select aggfns(distinct a,b,c order by a,c using ~<~,b) filter (where a > 1) from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), generate_series(1,2) i; aggfns --------------------------- {"(2,2,bar)","(3,1,baz)"} (1 row) -- check handling of bare boolean Var in FILTER select max(0) filter (where b1) from bool_test; max ----- 0 (1 row) select (select max(0) filter (where b1)) from bool_test; max ----- 0 (1 row) -- check for correct detection of nested-aggregate errors in FILTER select max(unique1) filter (where sum(ten) > 0) from tenk1; ERROR: aggregate functions are not allowed in FILTER LINE 1: select max(unique1) filter (where sum(ten) > 0) from tenk1; ^ select (select max(unique1) filter (where sum(ten) > 0) from int8_tbl) from tenk1; ERROR: aggregate function calls cannot be nested LINE 1: select (select max(unique1) filter (where sum(ten) > 0) from... ^ select max(unique1) filter (where bool_or(ten > 0)) from tenk1; ERROR: aggregate functions are not allowed in FILTER LINE 1: select max(unique1) filter (where bool_or(ten > 0)) from ten... ^ select (select max(unique1) filter (where bool_or(ten > 0)) from int8_tbl) from tenk1; ERROR: aggregate function calls cannot be nested LINE 1: select (select max(unique1) filter (where bool_or(ten > 0)) ... ^ -- ordered-set aggregates select p, percentile_cont(p) within group (order by x::float8) from generate_series(1,5) x, (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) group by p order by p; p | percentile_cont ------+----------------- 0 | 1 0.1 | 1.4 0.25 | 2 0.4 | 2.6 0.5 | 3 0.6 | 3.4 0.75 | 4 0.9 | 4.6 1 | 5 (9 rows) select p, percentile_cont(p order by p) within group (order by x) -- error from generate_series(1,5) x, (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) group by p order by p; ERROR: cannot use multiple ORDER BY clauses with WITHIN GROUP LINE 1: select p, percentile_cont(p order by p) within group (order ... ^ select p, sum() within group (order by x::float8) -- error from generate_series(1,5) x, (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) group by p order by p; ERROR: sum is not an ordered-set aggregate, so it cannot have WITHIN GROUP LINE 1: select p, sum() within group (order by x::float8) -- error ^ select p, percentile_cont(p,p) -- error from generate_series(1,5) x, (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) group by p order by p; ERROR: WITHIN GROUP is required for ordered-set aggregate percentile_cont LINE 1: select p, percentile_cont(p,p) -- error ^ select percentile_cont(0.5) within group (order by b) from aggtest; percentile_cont ------------------ 53.4485001564026 (1 row) select percentile_cont(0.5) within group (order by b), sum(b) from aggtest; percentile_cont | sum ------------------+--------- 53.4485001564026 | 431.773 (1 row) select percentile_cont(0.5) within group (order by thousand) from tenk1; percentile_cont ----------------- 499.5 (1 row) select percentile_disc(0.5) within group (order by thousand) from tenk1; percentile_disc ----------------- 499 (1 row) select rank(3) within group (order by x) from (values (1),(1),(2),(2),(3),(3),(4)) v(x); rank ------ 5 (1 row) select cume_dist(3) within group (order by x) from (values (1),(1),(2),(2),(3),(3),(4)) v(x); cume_dist ----------- 0.875 (1 row) select percent_rank(3) within group (order by x) from (values (1),(1),(2),(2),(3),(3),(4),(5)) v(x); percent_rank -------------- 0.5 (1 row) select dense_rank(3) within group (order by x) from (values (1),(1),(2),(2),(3),(3),(4)) v(x); dense_rank ------------ 3 (1 row) select percentile_disc(array[0,0.1,0.25,0.5,0.75,0.9,1]) within group (order by thousand) from tenk1; percentile_disc ---------------------------- {0,99,249,499,749,899,999} (1 row) select percentile_cont(array[0,0.25,0.5,0.75,1]) within group (order by thousand) from tenk1; percentile_cont ----------------------------- {0,249.75,499.5,749.25,999} (1 row) select percentile_disc(array[[null,1,0.5],[0.75,0.25,null]]) within group (order by thousand) from tenk1; percentile_disc --------------------------------- {{NULL,999,499},{749,249,NULL}} (1 row) select percentile_cont(array[0,1,0.25,0.75,0.5,1,0.3,0.32,0.35,0.38,0.4]) within group (order by x) from generate_series(1,6) x; percentile_cont ------------------------------------------ {1,6,2.25,4.75,3.5,6,2.5,2.6,2.75,2.9,3} (1 row) select ten, mode() within group (order by string4) from tenk1 group by ten; ten | mode -----+-------- 0 | HHHHxx 1 | OOOOxx 2 | VVVVxx 3 | OOOOxx 4 | HHHHxx 5 | HHHHxx 6 | OOOOxx 7 | AAAAxx 8 | VVVVxx 9 | VVVVxx (10 rows) select percentile_disc(array[0.25,0.5,0.75]) within group (order by x) from unnest('{fred,jim,fred,jack,jill,fred,jill,jim,jim,sheila,jim,sheila}'::text[]) u(x); percentile_disc ----------------- {fred,jill,jim} (1 row) -- check collation propagates up in suitable cases: select pg_collation_for(percentile_disc(1) within group (order by x collate "POSIX")) from (values ('fred'),('jim')) v(x); pg_collation_for ------------------ "POSIX" (1 row) -- ordered-set aggs created with CREATE AGGREGATE select test_rank(3) within group (order by x) from (values (1),(1),(2),(2),(3),(3),(4)) v(x); test_rank ----------- 5 (1 row) select test_percentile_disc(0.5) within group (order by thousand) from tenk1; test_percentile_disc ---------------------- 499 (1 row) -- ordered-set aggs can't use ungrouped vars in direct args: select rank(x) within group (order by x) from generate_series(1,5) x; ERROR: column "x.x" must appear in the GROUP BY clause or be used in an aggregate function LINE 1: select rank(x) within group (order by x) from generate_serie... ^ DETAIL: Direct arguments of an ordered-set aggregate must use only grouped columns. -- outer-level agg can't use a grouped arg of a lower level, either: select array(select percentile_disc(a) within group (order by x) from (values (0.3),(0.7)) v(a) group by a) from generate_series(1,5) g(x); ERROR: outer-level aggregate cannot contain a lower-level variable in its direct arguments LINE 1: select array(select percentile_disc(a) within group (order b... ^ -- agg in the direct args is a grouping violation, too: select rank(sum(x)) within group (order by x) from generate_series(1,5) x; ERROR: aggregate function calls cannot be nested LINE 1: select rank(sum(x)) within group (order by x) from generate_... ^ -- hypothetical-set type unification and argument-count failures: select rank(3) within group (order by x) from (values ('fred'),('jim')) v(x); ERROR: WITHIN GROUP types text and integer cannot be matched LINE 1: select rank(3) within group (order by x) from (values ('fred... ^ select rank(3) within group (order by stringu1,stringu2) from tenk1; ERROR: function rank(integer, name, name) does not exist LINE 1: select rank(3) within group (order by stringu1,stringu2) fro... ^ HINT: To use the hypothetical-set aggregate rank, the number of hypothetical direct arguments (here 1) must match the number of ordering columns (here 2). select rank('fred') within group (order by x) from generate_series(1,5) x; ERROR: invalid input syntax for type integer: "fred" LINE 1: select rank('fred') within group (order by x) from generate_... ^ select rank('adam'::text collate "C") within group (order by x collate "POSIX") from (values ('fred'),('jim')) v(x); ERROR: collation mismatch between explicit collations "C" and "POSIX" LINE 1: ...adam'::text collate "C") within group (order by x collate "P... ^ -- hypothetical-set type unification successes: select rank('adam'::varchar) within group (order by x) from (values ('fred'),('jim')) v(x); rank ------ 1 (1 row) select rank('3') within group (order by x) from generate_series(1,5) x; rank ------ 3 (1 row) -- divide by zero check select percent_rank(0) within group (order by x) from generate_series(1,0) x; percent_rank -------------- 0 (1 row) -- deparse and multiple features: create view aggordview1 as select ten, percentile_disc(0.5) within group (order by thousand) as p50, percentile_disc(0.5) within group (order by thousand) filter (where hundred=1) as px, rank(5,'AZZZZ',50) within group (order by hundred, string4 desc, hundred) from tenk1 group by ten order by ten; select pg_get_viewdef('aggordview1'); pg_get_viewdef ------------------------------------------------------------------------------------------------------------------- SELECT ten, + percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) AS p50, + percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) FILTER (WHERE (hundred = 1)) AS px,+ rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY hundred, string4 DESC, hundred) AS rank + FROM tenk1 + GROUP BY ten + ORDER BY ten; (1 row) select * from aggordview1 order by ten; ten | p50 | px | rank -----+-----+-----+------ 0 | 490 | | 101 1 | 491 | 401 | 101 2 | 492 | | 101 3 | 493 | | 101 4 | 494 | | 101 5 | 495 | | 67 6 | 496 | | 1 7 | 497 | | 1 8 | 498 | | 1 9 | 499 | | 1 (10 rows) drop view aggordview1; -- variadic aggregates select least_agg(q1,q2) from int8_tbl; least_agg ------------------- -4567890123456789 (1 row) select least_agg(variadic array[q1,q2]) from int8_tbl; least_agg ------------------- -4567890123456789 (1 row) select cleast_agg(q1,q2) from int8_tbl; cleast_agg ------------------- -4567890123456789 (1 row) select cleast_agg(4.5,f1) from int4_tbl; cleast_agg ------------- -2147483647 (1 row) select cleast_agg(variadic array[4.5,f1]) from int4_tbl; cleast_agg ------------- -2147483647 (1 row) select pg_typeof(cleast_agg(variadic array[4.5,f1])) from int4_tbl; pg_typeof ----------- numeric (1 row) -- test aggregates with common transition functions share the same states begin work; create type avg_state as (total bigint, count bigint); create or replace function avg_transfn(state avg_state, n int) returns avg_state as $$ declare new_state avg_state; begin raise notice 'avg_transfn called with %', n; if state is null then if n is not null then new_state.total := n; new_state.count := 1; return new_state; end if; return null; elsif n is not null then state.total := state.total + n; state.count := state.count + 1; return state; end if; return null; end $$ language plpgsql; create function avg_finalfn(state avg_state) returns int4 as $$ begin if state is null then return NULL; else return state.total / state.count; end if; end $$ language plpgsql; create function sum_finalfn(state avg_state) returns int4 as $$ begin if state is null then return NULL; else return state.total; end if; end $$ language plpgsql; create aggregate my_avg(int4) ( stype = avg_state, sfunc = avg_transfn, finalfunc = avg_finalfn ); create aggregate my_sum(int4) ( stype = avg_state, sfunc = avg_transfn, finalfunc = sum_finalfn ); -- aggregate state should be shared as aggs are the same. select my_avg(one),my_avg(one) from (values(1),(3)) t(one); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 3 my_avg | my_avg --------+-------- 2 | 2 (1 row) -- aggregate state should be shared as transfn is the same for both aggs. select my_avg(one),my_sum(one) from (values(1),(3)) t(one); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 3 my_avg | my_sum --------+-------- 2 | 4 (1 row) -- same as previous one, but with DISTINCT, which requires sorting the input. select my_avg(distinct one),my_sum(distinct one) from (values(1),(3),(1)) t(one); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 3 my_avg | my_sum --------+-------- 2 | 4 (1 row) -- shouldn't share states due to the distinctness not matching. select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 3 NOTICE: avg_transfn called with 3 my_avg | my_sum --------+-------- 2 | 4 (1 row) -- shouldn't share states due to the filter clause not matching. select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 3 NOTICE: avg_transfn called with 3 my_avg | my_sum --------+-------- 3 | 4 (1 row) -- this should not share the state due to different input columns. select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 2 NOTICE: avg_transfn called with 3 NOTICE: avg_transfn called with 4 my_avg | my_sum --------+-------- 2 | 6 (1 row) -- exercise cases where OSAs share state select percentile_cont(0.5) within group (order by a), percentile_disc(0.5) within group (order by a) from (values(1::float8),(3),(5),(7)) t(a); percentile_cont | percentile_disc -----------------+----------------- 4 | 3 (1 row) select percentile_cont(0.25) within group (order by a), percentile_disc(0.5) within group (order by a) from (values(1::float8),(3),(5),(7)) t(a); percentile_cont | percentile_disc -----------------+----------------- 2.5 | 3 (1 row) -- these can't share state currently select rank(4) within group (order by a), dense_rank(4) within group (order by a) from (values(1),(3),(5),(7)) t(a); rank | dense_rank ------+------------ 3 | 3 (1 row) -- test that aggs with the same sfunc and initcond share the same agg state create aggregate my_sum_init(int4) ( stype = avg_state, sfunc = avg_transfn, finalfunc = sum_finalfn, initcond = '(10,0)' ); create aggregate my_avg_init(int4) ( stype = avg_state, sfunc = avg_transfn, finalfunc = avg_finalfn, initcond = '(10,0)' ); create aggregate my_avg_init2(int4) ( stype = avg_state, sfunc = avg_transfn, finalfunc = avg_finalfn, initcond = '(4,0)' ); -- state should be shared if INITCONDs are matching select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 3 my_sum_init | my_avg_init -------------+------------- 14 | 7 (1 row) -- Varying INITCONDs should cause the states not to be shared. select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one); NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 1 NOTICE: avg_transfn called with 3 NOTICE: avg_transfn called with 3 my_sum_init | my_avg_init2 -------------+-------------- 14 | 4 (1 row) rollback; -- test aggregate state sharing to ensure it works if one aggregate has a -- finalfn and the other one has none. begin work; create or replace function sum_transfn(state int4, n int4) returns int4 as $$ declare new_state int4; begin raise notice 'sum_transfn called with %', n; if state is null then if n is not null then new_state := n; return new_state; end if; return null; elsif n is not null then state := state + n; return state; end if; return null; end $$ language plpgsql; create function halfsum_finalfn(state int4) returns int4 as $$ begin if state is null then return NULL; else return state / 2; end if; end $$ language plpgsql; create aggregate my_sum(int4) ( stype = int4, sfunc = sum_transfn ); create aggregate my_half_sum(int4) ( stype = int4, sfunc = sum_transfn, finalfunc = halfsum_finalfn ); -- Agg state should be shared even though my_sum has no finalfn select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one); NOTICE: sum_transfn called with 1 NOTICE: sum_transfn called with 2 NOTICE: sum_transfn called with 3 NOTICE: sum_transfn called with 4 my_sum | my_half_sum --------+------------- 10 | 5 (1 row) rollback; -- test that the aggregate transition logic correctly handles -- transition / combine functions returning NULL -- First test the case of a normal transition function returning NULL BEGIN; CREATE FUNCTION balkifnull(int8, int4) RETURNS int8 STRICT LANGUAGE plpgsql AS $$ BEGIN IF $1 IS NULL THEN RAISE 'erroneously called with NULL argument'; END IF; RETURN NULL; END$$; CREATE AGGREGATE balk(int4) ( SFUNC = balkifnull(int8, int4), STYPE = int8, PARALLEL = SAFE, INITCOND = '0' ); SELECT balk(hundred) FROM tenk1; balk ------ (1 row) ROLLBACK; -- GROUP BY optimization by reordering GROUP BY clauses CREATE TABLE btg AS SELECT i % 10 AS x, i % 10 AS y, 'abc' || i % 10 AS z, i AS w FROM generate_series(1, 100) AS i; CREATE INDEX btg_x_y_idx ON btg(x, y); ANALYZE btg; SET enable_hashagg = off; SET enable_seqscan = off; -- Utilize the ordering of index scan to avoid a Sort operation EXPLAIN (COSTS OFF) SELECT count(*) FROM btg GROUP BY y, x; QUERY PLAN ------------------------------------------------ GroupAggregate Group Key: x, y -> Index Only Scan using btg_x_y_idx on btg (3 rows) -- Engage incremental sort EXPLAIN (COSTS OFF) SELECT count(*) FROM btg GROUP BY z, y, w, x; QUERY PLAN ------------------------------------------------- GroupAggregate Group Key: x, y, z, w -> Incremental Sort Sort Key: x, y, z, w Presorted Key: x, y -> Index Scan using btg_x_y_idx on btg (6 rows) -- Utilize the ordering of subquery scan to avoid a Sort operation EXPLAIN (COSTS OFF) SELECT count(*) FROM (SELECT * FROM btg ORDER BY x, y, w, z) AS q1 GROUP BY w, x, z, y; QUERY PLAN ------------------------------------------------- GroupAggregate Group Key: btg.x, btg.y, btg.w, btg.z -> Incremental Sort Sort Key: btg.x, btg.y, btg.w, btg.z Presorted Key: btg.x, btg.y -> Index Scan using btg_x_y_idx on btg (6 rows) -- Utilize the ordering of merge join to avoid a full Sort operation SET enable_hashjoin = off; SET enable_nestloop = off; EXPLAIN (COSTS OFF) SELECT count(*) FROM btg t1 JOIN btg t2 ON t1.z = t2.z AND t1.w = t2.w AND t1.x = t2.x GROUP BY t1.x, t1.y, t1.z, t1.w; QUERY PLAN ------------------------------------------------------------------------------- GroupAggregate Group Key: t1.z, t1.w, t1.x, t1.y -> Incremental Sort Sort Key: t1.z, t1.w, t1.x, t1.y Presorted Key: t1.z, t1.w, t1.x -> Merge Join Merge Cond: ((t1.z = t2.z) AND (t1.w = t2.w) AND (t1.x = t2.x)) -> Sort Sort Key: t1.z, t1.w, t1.x -> Index Scan using btg_x_y_idx on btg t1 -> Sort Sort Key: t2.z, t2.w, t2.x -> Index Scan using btg_x_y_idx on btg t2 (13 rows) RESET enable_nestloop; RESET enable_hashjoin; -- Should work with and without GROUP-BY optimization EXPLAIN (COSTS OFF) SELECT count(*) FROM btg GROUP BY w, x, z, y ORDER BY y, x, z, w; QUERY PLAN ------------------------------------------------- GroupAggregate Group Key: y, x, z, w -> Sort Sort Key: y, x, z, w -> Index Scan using btg_x_y_idx on btg (5 rows) -- Utilize incremental sort to make the ORDER BY rule a bit cheaper EXPLAIN (COSTS OFF) SELECT count(*) FROM btg GROUP BY w, x, y, z ORDER BY x*x, z; QUERY PLAN ------------------------------------------------------- Sort Sort Key: ((x * x)), z -> GroupAggregate Group Key: x, y, w, z -> Incremental Sort Sort Key: x, y, w, z Presorted Key: x, y -> Index Scan using btg_x_y_idx on btg (8 rows) -- Test the case where the number of incoming subtree path keys is more than -- the number of grouping keys. CREATE INDEX btg_y_x_w_idx ON btg(y, x, w); EXPLAIN (VERBOSE, COSTS OFF) SELECT y, x, array_agg(distinct w) FROM btg WHERE y < 0 GROUP BY x, y; QUERY PLAN --------------------------------------------------------- GroupAggregate Output: y, x, array_agg(DISTINCT w) Group Key: btg.y, btg.x -> Index Only Scan using btg_y_x_w_idx on public.btg Output: y, x, w Index Cond: (btg.y < 0) (6 rows) -- Ensure that we do not select the aggregate pathkeys instead of the grouping -- pathkeys CREATE TABLE group_agg_pk AS SELECT i % 10 AS x, i % 2 AS y, i % 2 AS z, 2 AS w, i % 10 AS f FROM generate_series(1,100) AS i; ANALYZE group_agg_pk; SET enable_nestloop = off; SET enable_hashjoin = off; EXPLAIN (COSTS OFF) SELECT avg(c1.f ORDER BY c1.x, c1.y) FROM group_agg_pk c1 JOIN group_agg_pk c2 ON c1.x = c2.x GROUP BY c1.w, c1.z; QUERY PLAN ----------------------------------------------------- GroupAggregate Group Key: c1.w, c1.z -> Sort Sort Key: c1.w, c1.z, c1.x, c1.y -> Merge Join Merge Cond: (c1.x = c2.x) -> Sort Sort Key: c1.x -> Seq Scan on group_agg_pk c1 -> Sort Sort Key: c2.x -> Seq Scan on group_agg_pk c2 (12 rows) SELECT avg(c1.f ORDER BY c1.x, c1.y) FROM group_agg_pk c1 JOIN group_agg_pk c2 ON c1.x = c2.x GROUP BY c1.w, c1.z; avg -------------------- 4.0000000000000000 5.0000000000000000 (2 rows) RESET enable_nestloop; RESET enable_hashjoin; DROP TABLE group_agg_pk; -- Test the case where the the ordering of scan matches the ordering within the -- aggregate but cannot be found in the group-by list CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int); CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2); INSERT INTO agg_sort_order SELECT i, i FROM generate_series(1,100)i; ANALYZE agg_sort_order; EXPLAIN (COSTS OFF) SELECT array_agg(c1 ORDER BY c2),c2 FROM agg_sort_order WHERE c2 < 100 GROUP BY c1 ORDER BY 2; QUERY PLAN ---------------------------------------------------------------------------- Sort Sort Key: c2 -> GroupAggregate Group Key: c1 -> Sort Sort Key: c1, c2 -> Index Scan using agg_sort_order_c2_idx on agg_sort_order Index Cond: (c2 < 100) (8 rows) DROP TABLE agg_sort_order CASCADE; DROP TABLE btg; RESET enable_hashagg; RESET enable_seqscan; -- Secondly test the case of a parallel aggregate combiner function -- returning NULL. For that use normal transition function, but a -- combiner function returning NULL. BEGIN; CREATE FUNCTION balkifnull(int8, int8) RETURNS int8 PARALLEL SAFE STRICT LANGUAGE plpgsql AS $$ BEGIN IF $1 IS NULL THEN RAISE 'erroneously called with NULL argument'; END IF; RETURN NULL; END$$; CREATE AGGREGATE balk(int4) ( SFUNC = int4_sum(int8, int4), STYPE = int8, COMBINEFUNC = balkifnull(int8, int8), PARALLEL = SAFE, INITCOND = '0' ); -- force use of parallelism ALTER TABLE tenk1 set (parallel_workers = 4); SET LOCAL parallel_setup_cost=0; SET LOCAL max_parallel_workers_per_gather=4; EXPLAIN (COSTS OFF) SELECT balk(hundred) FROM tenk1; QUERY PLAN ------------------------------------------------------------------------- Finalize Aggregate -> Gather Workers Planned: 4 -> Partial Aggregate -> Parallel Index Only Scan using tenk1_hundred on tenk1 (5 rows) SELECT balk(hundred) FROM tenk1; balk ------ (1 row) ROLLBACK; -- test multiple usage of an aggregate whose finalfn returns a R/W datum BEGIN; CREATE FUNCTION rwagg_sfunc(x anyarray, y anyarray) RETURNS anyarray LANGUAGE plpgsql IMMUTABLE AS $$ BEGIN RETURN array_fill(y[1], ARRAY[4]); END; $$; CREATE FUNCTION rwagg_finalfunc(x anyarray) RETURNS anyarray LANGUAGE plpgsql STRICT IMMUTABLE AS $$ DECLARE res x%TYPE; BEGIN -- assignment is essential for this test, it expands the array to R/W res := array_fill(x[1], ARRAY[4]); RETURN res; END; $$; CREATE AGGREGATE rwagg(anyarray) ( STYPE = anyarray, SFUNC = rwagg_sfunc, FINALFUNC = rwagg_finalfunc ); CREATE FUNCTION eatarray(x real[]) RETURNS real[] LANGUAGE plpgsql STRICT IMMUTABLE AS $$ BEGIN x[1] := x[1] + 1; RETURN x; END; $$; SELECT eatarray(rwagg(ARRAY[1.0::real])), eatarray(rwagg(ARRAY[1.0::real])); eatarray | eatarray -----------+----------- {2,1,1,1} | {2,1,1,1} (1 row) ROLLBACK; -- test coverage for aggregate combine/serial/deserial functions BEGIN; SET parallel_setup_cost = 0; SET parallel_tuple_cost = 0; SET min_parallel_table_scan_size = 0; SET max_parallel_workers_per_gather = 4; SET parallel_leader_participation = off; SET enable_indexonlyscan = off; -- variance(int4) covers numeric_poly_combine -- sum(int8) covers int8_avg_combine -- regr_count(float8, float8) covers int8inc_float8_float8 and aggregates with > 1 arg EXPLAIN (COSTS OFF, VERBOSE) SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) u; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: variance(tenk1.unique1), sum((tenk1.unique1)::bigint), regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision) -> Gather Output: (PARTIAL variance(tenk1.unique1)), (PARTIAL sum((tenk1.unique1)::bigint)), (PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision)) Workers Planned: 4 -> Partial Aggregate Output: PARTIAL variance(tenk1.unique1), PARTIAL sum((tenk1.unique1)::bigint), PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision) -> Parallel Append -> Parallel Seq Scan on public.tenk1 Output: tenk1.unique1 -> Parallel Seq Scan on public.tenk1 tenk1_1 Output: tenk1_1.unique1 -> Parallel Seq Scan on public.tenk1 tenk1_2 Output: tenk1_2.unique1 -> Parallel Seq Scan on public.tenk1 tenk1_3 Output: tenk1_3.unique1 (16 rows) SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) u; variance | sum | regr_count ----------------------+-----------+------------ 8333541.588539713493 | 199980000 | 40000 (1 row) -- variance(int8) covers numeric_combine -- avg(numeric) covers numeric_avg_combine EXPLAIN (COSTS OFF, VERBOSE) SELECT variance(unique1::int8), avg(unique1::numeric) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) u; QUERY PLAN -------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: variance((tenk1.unique1)::bigint), avg((tenk1.unique1)::numeric) -> Gather Output: (PARTIAL variance((tenk1.unique1)::bigint)), (PARTIAL avg((tenk1.unique1)::numeric)) Workers Planned: 4 -> Partial Aggregate Output: PARTIAL variance((tenk1.unique1)::bigint), PARTIAL avg((tenk1.unique1)::numeric) -> Parallel Append -> Parallel Seq Scan on public.tenk1 Output: tenk1.unique1 -> Parallel Seq Scan on public.tenk1 tenk1_1 Output: tenk1_1.unique1 -> Parallel Seq Scan on public.tenk1 tenk1_2 Output: tenk1_2.unique1 -> Parallel Seq Scan on public.tenk1 tenk1_3 Output: tenk1_3.unique1 (16 rows) SELECT variance(unique1::int8), avg(unique1::numeric) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) u; variance | avg ----------------------+----------------------- 8333541.588539713493 | 4999.5000000000000000 (1 row) ROLLBACK; -- test coverage for dense_rank SELECT dense_rank(x) WITHIN GROUP (ORDER BY x) FROM (VALUES (1),(1),(2),(2),(3),(3)) v(x) GROUP BY (x) ORDER BY 1; dense_rank ------------ 1 1 1 (3 rows) -- Ensure that the STRICT checks for aggregates does not take NULLness -- of ORDER BY columns into account. See bug report around -- 2a505161-2727-2473-7c46-591ed108ac52@email.cz SELECT min(x ORDER BY y) FROM (VALUES(1, NULL)) AS d(x,y); min ----- 1 (1 row) SELECT min(x ORDER BY y) FROM (VALUES(1, 2)) AS d(x,y); min ----- 1 (1 row) -- check collation-sensitive matching between grouping expressions select v||'a', case v||'a' when 'aa' then 1 else 0 end, count(*) from unnest(array['a','b']) u(v) group by v||'a' order by 1; ?column? | case | count ----------+------+------- aa | 1 | 1 ba | 0 | 1 (2 rows) select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) from unnest(array['a','b']) u(v) group by v||'a' order by 1; ?column? | case | count ----------+------+------- aa | 1 | 1 ba | 0 | 1 (2 rows) -- Make sure that generation of HashAggregate for uniqification purposes -- does not lead to array overflow due to unexpected duplicate hash keys -- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com set enable_memoize to off; explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek); QUERY PLAN ------------------------------------------------------------- Hash Join Hash Cond: (tenk1.hundred = onek.twothousand) -> Seq Scan on tenk1 Filter: (hundred = thousand) -> Hash -> HashAggregate Group Key: onek.twothousand, onek.twothousand -> Seq Scan on onek (8 rows) reset enable_memoize; -- -- Hash Aggregation Spill tests -- set enable_sort=false; set work_mem='64kB'; select unique1, count(*), sum(twothousand) from tenk1 group by unique1 having sum(fivethous) > 4975 order by sum(twothousand); unique1 | count | sum ---------+-------+------ 4976 | 1 | 976 4977 | 1 | 977 4978 | 1 | 978 4979 | 1 | 979 4980 | 1 | 980 4981 | 1 | 981 4982 | 1 | 982 4983 | 1 | 983 4984 | 1 | 984 4985 | 1 | 985 4986 | 1 | 986 4987 | 1 | 987 4988 | 1 | 988 4989 | 1 | 989 4990 | 1 | 990 4991 | 1 | 991 4992 | 1 | 992 4993 | 1 | 993 4994 | 1 | 994 4995 | 1 | 995 4996 | 1 | 996 4997 | 1 | 997 4998 | 1 | 998 4999 | 1 | 999 9976 | 1 | 1976 9977 | 1 | 1977 9978 | 1 | 1978 9979 | 1 | 1979 9980 | 1 | 1980 9981 | 1 | 1981 9982 | 1 | 1982 9983 | 1 | 1983 9984 | 1 | 1984 9985 | 1 | 1985 9986 | 1 | 1986 9987 | 1 | 1987 9988 | 1 | 1988 9989 | 1 | 1989 9990 | 1 | 1990 9991 | 1 | 1991 9992 | 1 | 1992 9993 | 1 | 1993 9994 | 1 | 1994 9995 | 1 | 1995 9996 | 1 | 1996 9997 | 1 | 1997 9998 | 1 | 1998 9999 | 1 | 1999 (48 rows) set work_mem to default; set enable_sort to default; -- -- Compare results between plans using sorting and plans using hash -- aggregation. Force spilling in both cases by setting work_mem low. -- set work_mem='64kB'; create table agg_data_2k as select g from generate_series(0, 1999) g; analyze agg_data_2k; create table agg_data_20k as select g from generate_series(0, 19999) g; analyze agg_data_20k; -- Produce results with sorting. set enable_hashagg = false; set jit_above_cost = 0; explain (costs off) select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 from agg_data_20k group by g%10000; QUERY PLAN -------------------------------------- GroupAggregate Group Key: ((g % 10000)) -> Sort Sort Key: ((g % 10000)) -> Seq Scan on agg_data_20k (5 rows) create table agg_group_1 as select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 from agg_data_20k group by g%10000; create table agg_group_2 as select * from (values (100), (300), (500)) as r(a), lateral ( select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3 from agg_data_2k where g < r.a group by g/2) as s; set jit_above_cost to default; create table agg_group_3 as select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3 from agg_data_2k group by g/2; create table agg_group_4 as select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3 from agg_data_2k group by g/2; -- Produce results with hash aggregation set enable_hashagg = true; set enable_sort = false; set jit_above_cost = 0; explain (costs off) select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 from agg_data_20k group by g%10000; QUERY PLAN -------------------------------- HashAggregate Group Key: (g % 10000) -> Seq Scan on agg_data_20k (3 rows) create table agg_hash_1 as select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 from agg_data_20k group by g%10000; create table agg_hash_2 as select * from (values (100), (300), (500)) as r(a), lateral ( select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3 from agg_data_2k where g < r.a group by g/2) as s; set jit_above_cost to default; create table agg_hash_3 as select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3 from agg_data_2k group by g/2; create table agg_hash_4 as select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3 from agg_data_2k group by g/2; set enable_sort = true; set work_mem to default; -- Compare group aggregation results to hash aggregation results (select * from agg_hash_1 except select * from agg_group_1) union all (select * from agg_group_1 except select * from agg_hash_1); c1 | c2 | c3 ----+----+---- (0 rows) (select * from agg_hash_2 except select * from agg_group_2) union all (select * from agg_group_2 except select * from agg_hash_2); a | c1 | c2 | c3 ---+----+----+---- (0 rows) (select * from agg_hash_3 except select * from agg_group_3) union all (select * from agg_group_3 except select * from agg_hash_3); c1 | c2 | c3 ----+----+---- (0 rows) (select * from agg_hash_4 except select * from agg_group_4) union all (select * from agg_group_4 except select * from agg_hash_4); c1 | c2 | c3 ----+----+---- (0 rows) drop table agg_group_1; drop table agg_group_2; drop table agg_group_3; drop table agg_group_4; drop table agg_hash_1; drop table agg_hash_2; drop table agg_hash_3; drop table agg_hash_4;