postgresql/src/test/regress/expected/select_distinct.out

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

467 lines
10 KiB
Plaintext
Raw Normal View History

2000-01-06 07:40:54 +01:00
--
-- SELECT_DISTINCT
--
--
-- awk '{print $3;}' onek.data | sort -n | uniq
--
SELECT DISTINCT two FROM onek ORDER BY 1;
2000-01-06 07:40:54 +01:00
two
-----
0
1
(2 rows)
2000-01-06 07:40:54 +01:00
--
-- awk '{print $5;}' onek.data | sort -n | uniq
--
SELECT DISTINCT ten FROM onek ORDER BY 1;
2000-01-06 07:40:54 +01:00
ten
-----
0
1
2
3
4
5
6
7
8
9
(10 rows)
2000-01-06 07:40:54 +01:00
--
-- awk '{print $16;}' onek.data | sort -d | uniq
--
SELECT DISTINCT string4 FROM onek ORDER BY 1;
2000-01-06 07:40:54 +01:00
string4
---------
AAAAxx
HHHHxx
OOOOxx
VVVVxx
(4 rows)
2000-01-06 07:40:54 +01:00
--
-- awk '{print $3,$16,$5;}' onek.data | sort -d | uniq |
-- sort +0n -1 +1d -2 +2n -3
--
SELECT DISTINCT two, string4, ten
FROM onek
ORDER BY two using <, string4 using <, ten using <;
2000-01-06 07:40:54 +01:00
two | string4 | ten
-----+---------+-----
0 | AAAAxx | 0
0 | AAAAxx | 2
0 | AAAAxx | 4
0 | AAAAxx | 6
0 | AAAAxx | 8
0 | HHHHxx | 0
0 | HHHHxx | 2
0 | HHHHxx | 4
0 | HHHHxx | 6
0 | HHHHxx | 8
0 | OOOOxx | 0
0 | OOOOxx | 2
0 | OOOOxx | 4
0 | OOOOxx | 6
0 | OOOOxx | 8
0 | VVVVxx | 0
0 | VVVVxx | 2
0 | VVVVxx | 4
0 | VVVVxx | 6
0 | VVVVxx | 8
1 | AAAAxx | 1
1 | AAAAxx | 3
1 | AAAAxx | 5
1 | AAAAxx | 7
1 | AAAAxx | 9
1 | HHHHxx | 1
1 | HHHHxx | 3
1 | HHHHxx | 5
1 | HHHHxx | 7
1 | HHHHxx | 9
1 | OOOOxx | 1
1 | OOOOxx | 3
1 | OOOOxx | 5
1 | OOOOxx | 7
1 | OOOOxx | 9
1 | VVVVxx | 1
1 | VVVVxx | 3
1 | VVVVxx | 5
1 | VVVVxx | 7
1 | VVVVxx | 9
(40 rows)
2000-01-06 07:40:54 +01:00
--
-- awk '{print $2;}' person.data |
-- awk '{if(NF!=1){print $2;}else{print;}}' - emp.data |
-- awk '{if(NF!=1){print $2;}else{print;}}' - student.data |
-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $5;}else{print;}}' - stud_emp.data |
-- sort -n -r | uniq
--
SELECT DISTINCT p.age FROM person* p ORDER BY age using >;
age
-----
98
88
78
68
60
58
50
48
40
38
34
30
28
25
24
23
20
19
18
8
(20 rows)
--
-- Check mentioning same column more than once
--
EXPLAIN (VERBOSE, COSTS OFF)
SELECT count(*) FROM
(SELECT DISTINCT two, four, two FROM tenk1) ss;
QUERY PLAN
--------------------------------------------------------
Aggregate
Output: count(*)
-> HashAggregate
Output: tenk1.two, tenk1.four, tenk1.two
Remove redundant grouping and DISTINCT columns. Avoid explicitly grouping by columns that we know are redundant for sorting, for example we need group by only one of x and y in SELECT ... WHERE x = y GROUP BY x, y This comes up more often than you might think, as shown by the changes in the regression tests. It's nearly free to detect too, since we are just piggybacking on the existing logic that detects redundant pathkeys. (In some of the existing plans that change, it's visible that a sort step preceding the grouping step already didn't bother to sort by the redundant column, making the old plan a bit silly-looking.) To do this, build processed_groupClause and processed_distinctClause lists that omit any provably-redundant sort items, and consult those not the originals where relevant. This means that within the planner, one should usually consult root->processed_groupClause or root->processed_distinctClause if one wants to know which columns are to be grouped on; but to check whether grouping or distinct-ing is happening at all, check non-NIL-ness of parse->groupClause or parse->distinctClause. This is comparable to longstanding rules about handling the HAVING clause, so I don't think it'll be a huge maintenance problem. nodeAgg.c also needs minor mods, because it's now possible to generate AGG_PLAIN and AGG_SORTED Agg nodes with zero grouping columns. Patch by me; thanks to Richard Guo and David Rowley for review. Discussion: https://postgr.es/m/185315.1672179489@sss.pgh.pa.us
2023-01-18 18:37:57 +01:00
Group Key: tenk1.two, tenk1.four
-> Seq Scan on public.tenk1
Output: tenk1.two, tenk1.four, tenk1.two
(7 rows)
SELECT count(*) FROM
(SELECT DISTINCT two, four, two FROM tenk1) ss;
count
-------
4
(1 row)
--
-- Compare results between plans using sorting and plans using hash
-- aggregation. Force spilling in both cases by setting work_mem low.
--
SET work_mem='64kB';
-- Produce results with sorting.
SET enable_hashagg=FALSE;
SET jit_above_cost=0;
EXPLAIN (costs off)
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
QUERY PLAN
------------------------------------------------
Unique
-> Sort
Sort Key: ((g % 1000))
-> Function Scan on generate_series g
(4 rows)
CREATE TABLE distinct_group_1 AS
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
SET jit_above_cost TO DEFAULT;
CREATE TABLE distinct_group_2 AS
SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
SET enable_seqscan = 0;
-- Check to see we get an incremental sort plan
EXPLAIN (costs off)
SELECT DISTINCT hundred, two FROM tenk1;
QUERY PLAN
-----------------------------------------------------
Unique
-> Incremental Sort
Sort Key: hundred, two
Presorted Key: hundred
-> Index Scan using tenk1_hundred on tenk1
(5 rows)
RESET enable_seqscan;
SET enable_hashagg=TRUE;
-- Produce results with hash aggregation.
SET enable_sort=FALSE;
SET jit_above_cost=0;
EXPLAIN (costs off)
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
QUERY PLAN
------------------------------------------
HashAggregate
Group Key: (g % 1000)
-> Function Scan on generate_series g
(3 rows)
CREATE TABLE distinct_hash_1 AS
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
SET jit_above_cost TO DEFAULT;
CREATE TABLE distinct_hash_2 AS
SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
SET enable_sort=TRUE;
SET work_mem TO DEFAULT;
-- Compare results
(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
UNION ALL
(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
?column?
----------
(0 rows)
(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
UNION ALL
(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
?column?
----------
(0 rows)
DROP TABLE distinct_hash_1;
DROP TABLE distinct_hash_2;
DROP TABLE distinct_group_1;
DROP TABLE distinct_group_2;
-- Test parallel DISTINCT
SET parallel_tuple_cost=0;
SET parallel_setup_cost=0;
SET min_parallel_table_scan_size=0;
SET max_parallel_workers_per_gather=2;
-- Ensure we get a parallel plan
EXPLAIN (costs off)
SELECT DISTINCT four FROM tenk1;
QUERY PLAN
----------------------------------------------------
Unique
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: four
-> HashAggregate
Group Key: four
-> Parallel Seq Scan on tenk1
(8 rows)
-- Ensure the parallel plan produces the correct results
SELECT DISTINCT four FROM tenk1;
four
------
0
1
2
3
(4 rows)
CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
BEGIN
RETURN a;
END;
$$ LANGUAGE plpgsql PARALLEL UNSAFE;
-- Ensure we don't do parallel distinct with a parallel unsafe function
EXPLAIN (COSTS OFF)
SELECT DISTINCT distinct_func(1) FROM tenk1;
QUERY PLAN
----------------------------------------------------------
Unique
-> Sort
Sort Key: (distinct_func(1))
-> Index Only Scan using tenk1_hundred on tenk1
(4 rows)
-- make the function parallel safe
CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
BEGIN
RETURN a;
END;
$$ LANGUAGE plpgsql PARALLEL SAFE;
-- Ensure we do parallel distinct now that the function is parallel safe
EXPLAIN (COSTS OFF)
SELECT DISTINCT distinct_func(1) FROM tenk1;
QUERY PLAN
----------------------------------------------------
Unique
-> Gather Merge
Workers Planned: 2
-> Unique
-> Sort
Sort Key: (distinct_func(1))
-> Parallel Seq Scan on tenk1
(7 rows)
RESET max_parallel_workers_per_gather;
RESET min_parallel_table_scan_size;
RESET parallel_setup_cost;
RESET parallel_tuple_cost;
Use Limit instead of Unique to implement DISTINCT, when possible When all of the query's DISTINCT pathkeys have been marked as redundant due to EquivalenceClasses existing which contain constants, we can just implement the DISTINCT operation on a query by just limiting the number of returned rows to 1 instead of performing a Unique on all of the matching (duplicate) rows. This applies in cases such as: SELECT DISTINCT col,col2 FROM tab WHERE col = 1 AND col2 = 10; If there are any matching rows, then they must all be {1,10}. There's no point in fetching all of those and running a Unique operator on them to leave only a single row. Here we effectively just find the first row and then stop. We are obviously unable to apply this optimization if either the col = 1 or col2 = 10 were missing from the WHERE clause or if there were any additional columns in the SELECT clause. Such queries are probably not all that common, but detecting when we can apply this optimization amounts to checking if the distinct_pathkeys are NULL, which is very cheap indeed. Nothing is done here to check if the query already has a LIMIT clause. If it does then the plan may end up with 2 Limits nodes. There's no harm in that and it's probably not worth the complexity to unify them into a single Limit node. Author: David Rowley Reviewed-by: Richard Guo Discussion: https://postgr.es/m/CAApHDvqS0j8RUWRUSgCAXxOqnYjHUXmKwspRj4GzVfOO25ByHA@mail.gmail.com Discussion: https://postgr.es/m/MEYPR01MB7101CD5DA0A07C9DE2B74850A4239@MEYPR01MB7101.ausprd01.prod.outlook.com
2022-10-28 12:04:38 +02:00
--
-- Test the planner's ability to use a LIMIT 1 instead of a Unique node when
-- all of the distinct_pathkeys have been marked as redundant
--
-- Ensure we get a plan with a Limit 1
EXPLAIN (COSTS OFF)
SELECT DISTINCT four FROM tenk1 WHERE four = 0;
QUERY PLAN
----------------------------
Limit
-> Seq Scan on tenk1
Filter: (four = 0)
(3 rows)
-- Ensure the above gives us the correct result
SELECT DISTINCT four FROM tenk1 WHERE four = 0;
four
------
0
(1 row)
-- Ensure we get a plan with a Limit 1
EXPLAIN (COSTS OFF)
SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
QUERY PLAN
---------------------------------------------
Limit
-> Seq Scan on tenk1
Filter: ((two <> 0) AND (four = 0))
(3 rows)
-- Ensure no rows are returned
SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
four
------
(0 rows)
-- Ensure we get a plan with a Limit 1 when the SELECT list contains constants
EXPLAIN (COSTS OFF)
SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
QUERY PLAN
----------------------------
Limit
-> Seq Scan on tenk1
Filter: (four = 0)
(3 rows)
-- Ensure we only get 1 row
SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
four | ?column? | ?column? | ?column?
------+----------+----------+----------
0 | 1 | 2 | 3
(1 row)
SET parallel_setup_cost=0;
SET min_parallel_table_scan_size=0;
SET max_parallel_workers_per_gather=2;
-- Ensure we get a plan with a Limit 1 in both partial distinct and final
-- distinct
EXPLAIN (COSTS OFF)
SELECT DISTINCT four FROM tenk1 WHERE four = 10;
QUERY PLAN
----------------------------------------------
Limit
-> Gather
Workers Planned: 2
-> Limit
-> Parallel Seq Scan on tenk1
Filter: (four = 10)
(6 rows)
RESET max_parallel_workers_per_gather;
RESET min_parallel_table_scan_size;
RESET parallel_setup_cost;
--
-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
-- very own regression file.
--
CREATE TEMP TABLE disttable (f1 integer);
INSERT INTO DISTTABLE VALUES(1);
INSERT INTO DISTTABLE VALUES(2);
INSERT INTO DISTTABLE VALUES(3);
INSERT INTO DISTTABLE VALUES(NULL);
-- basic cases
SELECT f1, f1 IS DISTINCT FROM 2 as "not 2" FROM disttable;
f1 | not 2
----+-------
1 | t
2 | f
3 | t
| t
(4 rows)
SELECT f1, f1 IS DISTINCT FROM NULL as "not null" FROM disttable;
f1 | not null
----+----------
1 | t
2 | t
3 | t
| f
(4 rows)
SELECT f1, f1 IS DISTINCT FROM f1 as "false" FROM disttable;
f1 | false
----+-------
1 | f
2 | f
3 | f
| f
(4 rows)
SELECT f1, f1 IS DISTINCT FROM f1+1 as "not null" FROM disttable;
f1 | not null
----+----------
1 | t
2 | t
3 | t
| f
(4 rows)
-- check that optimizer constant-folds it properly
SELECT 1 IS DISTINCT FROM 2 as "yes";
yes
-----
t
(1 row)
SELECT 2 IS DISTINCT FROM 2 as "no";
no
----
f
(1 row)
SELECT 2 IS DISTINCT FROM null as "yes";
yes
-----
t
(1 row)
SELECT null IS DISTINCT FROM null as "no";
no
----
f
(1 row)
-- negated form
SELECT 1 IS NOT DISTINCT FROM 2 as "no";
no
----
f
(1 row)
SELECT 2 IS NOT DISTINCT FROM 2 as "yes";
yes
-----
t
(1 row)
SELECT 2 IS NOT DISTINCT FROM null as "no";
no
----
f
(1 row)
SELECT null IS NOT DISTINCT FROM null as "yes";
yes
-----
t
(1 row)