2000-01-06 07:41:55 +01:00
|
|
|
--
|
|
|
|
-- SELECT_DISTINCT
|
|
|
|
--
|
|
|
|
|
1997-04-06 10:29:57 +02:00
|
|
|
--
|
|
|
|
-- awk '{print $3;}' onek.data | sort -n | uniq
|
|
|
|
--
|
2022-02-08 21:30:38 +01:00
|
|
|
SELECT DISTINCT two FROM onek ORDER BY 1;
|
1997-04-06 10:29:57 +02:00
|
|
|
|
|
|
|
--
|
|
|
|
-- awk '{print $5;}' onek.data | sort -n | uniq
|
|
|
|
--
|
2022-02-08 21:30:38 +01:00
|
|
|
SELECT DISTINCT ten FROM onek ORDER BY 1;
|
1997-04-06 10:29:57 +02:00
|
|
|
|
|
|
|
--
|
|
|
|
-- awk '{print $16;}' onek.data | sort -d | uniq
|
|
|
|
--
|
2022-02-08 21:30:38 +01:00
|
|
|
SELECT DISTINCT string4 FROM onek ORDER BY 1;
|
1997-04-06 10:29:57 +02:00
|
|
|
|
|
|
|
--
|
|
|
|
-- awk '{print $3,$16,$5;}' onek.data | sort -d | uniq |
|
|
|
|
-- sort +0n -1 +1d -2 +2n -3
|
|
|
|
--
|
|
|
|
SELECT DISTINCT two, string4, ten
|
2022-02-08 21:30:38 +01:00
|
|
|
FROM onek
|
1997-04-06 10:29:57 +02:00
|
|
|
ORDER BY two using <, string4 using <, ten using <;
|
|
|
|
|
|
|
|
--
|
|
|
|
-- awk '{print $2;}' person.data |
|
|
|
|
-- awk '{if(NF!=1){print $2;}else{print;}}' - emp.data |
|
|
|
|
-- awk '{if(NF!=1){print $2;}else{print;}}' - student.data |
|
|
|
|
-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $5;}else{print;}}' - stud_emp.data |
|
|
|
|
-- sort -n -r | uniq
|
|
|
|
--
|
|
|
|
SELECT DISTINCT p.age FROM person* p ORDER BY age using >;
|
|
|
|
|
2016-05-26 20:52:24 +02:00
|
|
|
--
|
|
|
|
-- Check mentioning same column more than once
|
|
|
|
--
|
|
|
|
|
|
|
|
EXPLAIN (VERBOSE, COSTS OFF)
|
|
|
|
SELECT count(*) FROM
|
|
|
|
(SELECT DISTINCT two, four, two FROM tenk1) ss;
|
|
|
|
|
|
|
|
SELECT count(*) FROM
|
|
|
|
(SELECT DISTINCT two, four, two FROM tenk1) ss;
|
|
|
|
|
Disk-based Hash Aggregation.
While performing hash aggregation, track memory usage when adding new
groups to a hash table. If the memory usage exceeds work_mem, enter
"spill mode".
In spill mode, new groups are not created in the hash table(s), but
existing groups continue to be advanced if input tuples match. Tuples
that would cause a new group to be created are instead spilled to a
logical tape to be processed later.
The tuples are spilled in a partitioned fashion. When all tuples from
the outer plan are processed (either by advancing the group or
spilling the tuple), finalize and emit the groups from the hash
table. Then, create new batches of work from the spilled partitions,
and select one of the saved batches and process it (possibly spilling
recursively).
Author: Jeff Davis
Reviewed-by: Tomas Vondra, Adam Lee, Justin Pryzby, Taylor Vesely, Melanie Plageman
Discussion: https://postgr.es/m/507ac540ec7c20136364b5272acbcd4574aa76ef.camel@j-davis.com
2020-03-18 23:42:02 +01:00
|
|
|
--
|
|
|
|
-- Compare results between plans using sorting and plans using hash
|
|
|
|
-- aggregation. Force spilling in both cases by setting work_mem low.
|
|
|
|
--
|
|
|
|
|
|
|
|
SET work_mem='64kB';
|
|
|
|
|
|
|
|
-- Produce results with sorting.
|
|
|
|
|
|
|
|
SET enable_hashagg=FALSE;
|
|
|
|
|
|
|
|
SET jit_above_cost=0;
|
|
|
|
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
|
|
|
|
CREATE TABLE distinct_group_1 AS
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
|
|
|
|
SET jit_above_cost TO DEFAULT;
|
|
|
|
|
|
|
|
CREATE TABLE distinct_group_2 AS
|
|
|
|
SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
|
|
|
|
|
2023-01-10 22:25:43 +01:00
|
|
|
SET enable_seqscan = 0;
|
|
|
|
|
|
|
|
-- Check to see we get an incremental sort plan
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT hundred, two FROM tenk1;
|
|
|
|
|
|
|
|
RESET enable_seqscan;
|
|
|
|
|
Disk-based Hash Aggregation.
While performing hash aggregation, track memory usage when adding new
groups to a hash table. If the memory usage exceeds work_mem, enter
"spill mode".
In spill mode, new groups are not created in the hash table(s), but
existing groups continue to be advanced if input tuples match. Tuples
that would cause a new group to be created are instead spilled to a
logical tape to be processed later.
The tuples are spilled in a partitioned fashion. When all tuples from
the outer plan are processed (either by advancing the group or
spilling the tuple), finalize and emit the groups from the hash
table. Then, create new batches of work from the spilled partitions,
and select one of the saved batches and process it (possibly spilling
recursively).
Author: Jeff Davis
Reviewed-by: Tomas Vondra, Adam Lee, Justin Pryzby, Taylor Vesely, Melanie Plageman
Discussion: https://postgr.es/m/507ac540ec7c20136364b5272acbcd4574aa76ef.camel@j-davis.com
2020-03-18 23:42:02 +01:00
|
|
|
SET enable_hashagg=TRUE;
|
|
|
|
|
|
|
|
-- Produce results with hash aggregation.
|
|
|
|
|
|
|
|
SET enable_sort=FALSE;
|
|
|
|
|
|
|
|
SET jit_above_cost=0;
|
|
|
|
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
|
|
|
|
CREATE TABLE distinct_hash_1 AS
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
|
|
|
|
SET jit_above_cost TO DEFAULT;
|
|
|
|
|
|
|
|
CREATE TABLE distinct_hash_2 AS
|
|
|
|
SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
|
|
|
|
|
|
|
|
SET enable_sort=TRUE;
|
|
|
|
|
|
|
|
SET work_mem TO DEFAULT;
|
|
|
|
|
|
|
|
-- Compare results
|
|
|
|
|
|
|
|
(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
|
|
|
|
UNION ALL
|
|
|
|
(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
|
|
|
|
|
|
|
|
(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
|
|
|
|
UNION ALL
|
|
|
|
(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
|
|
|
|
|
|
|
|
DROP TABLE distinct_hash_1;
|
|
|
|
DROP TABLE distinct_hash_2;
|
|
|
|
DROP TABLE distinct_group_1;
|
|
|
|
DROP TABLE distinct_group_2;
|
|
|
|
|
2021-08-22 13:31:16 +02:00
|
|
|
-- Test parallel DISTINCT
|
|
|
|
SET parallel_tuple_cost=0;
|
|
|
|
SET parallel_setup_cost=0;
|
|
|
|
SET min_parallel_table_scan_size=0;
|
2021-08-22 15:44:20 +02:00
|
|
|
SET max_parallel_workers_per_gather=2;
|
2021-08-22 13:31:16 +02:00
|
|
|
|
|
|
|
-- Ensure we get a parallel plan
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT four FROM tenk1;
|
|
|
|
|
|
|
|
-- Ensure the parallel plan produces the correct results
|
|
|
|
SELECT DISTINCT four FROM tenk1;
|
|
|
|
|
|
|
|
CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
|
|
|
|
BEGIN
|
|
|
|
RETURN a;
|
|
|
|
END;
|
|
|
|
$$ LANGUAGE plpgsql PARALLEL UNSAFE;
|
|
|
|
|
|
|
|
-- Ensure we don't do parallel distinct with a parallel unsafe function
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT distinct_func(1) FROM tenk1;
|
|
|
|
|
|
|
|
-- make the function parallel safe
|
|
|
|
CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
|
|
|
|
BEGIN
|
|
|
|
RETURN a;
|
|
|
|
END;
|
|
|
|
$$ LANGUAGE plpgsql PARALLEL SAFE;
|
|
|
|
|
|
|
|
-- Ensure we do parallel distinct now that the function is parallel safe
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT distinct_func(1) FROM tenk1;
|
|
|
|
|
2021-08-22 15:44:20 +02:00
|
|
|
RESET max_parallel_workers_per_gather;
|
2021-08-22 13:31:16 +02:00
|
|
|
RESET min_parallel_table_scan_size;
|
|
|
|
RESET parallel_setup_cost;
|
|
|
|
RESET parallel_tuple_cost;
|
|
|
|
|
2022-10-28 12:04:38 +02:00
|
|
|
--
|
|
|
|
-- Test the planner's ability to use a LIMIT 1 instead of a Unique node when
|
|
|
|
-- all of the distinct_pathkeys have been marked as redundant
|
|
|
|
--
|
|
|
|
|
|
|
|
-- Ensure we get a plan with a Limit 1
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0;
|
|
|
|
|
|
|
|
-- Ensure the above gives us the correct result
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0;
|
|
|
|
|
|
|
|
-- Ensure we get a plan with a Limit 1
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
|
|
|
|
|
|
|
|
-- Ensure no rows are returned
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
|
|
|
|
|
|
|
|
-- Ensure we get a plan with a Limit 1 when the SELECT list contains constants
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
|
|
|
|
|
|
|
|
-- Ensure we only get 1 row
|
|
|
|
SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
|
|
|
|
|
2002-12-13 21:16:11 +01:00
|
|
|
--
|
|
|
|
-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
|
|
|
|
-- very own regression file.
|
|
|
|
--
|
|
|
|
|
|
|
|
CREATE TEMP TABLE disttable (f1 integer);
|
|
|
|
INSERT INTO DISTTABLE VALUES(1);
|
|
|
|
INSERT INTO DISTTABLE VALUES(2);
|
|
|
|
INSERT INTO DISTTABLE VALUES(3);
|
|
|
|
INSERT INTO DISTTABLE VALUES(NULL);
|
|
|
|
|
|
|
|
-- basic cases
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM 2 as "not 2" FROM disttable;
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM NULL as "not null" FROM disttable;
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM f1 as "false" FROM disttable;
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM f1+1 as "not null" FROM disttable;
|
|
|
|
|
|
|
|
-- check that optimizer constant-folds it properly
|
|
|
|
SELECT 1 IS DISTINCT FROM 2 as "yes";
|
|
|
|
SELECT 2 IS DISTINCT FROM 2 as "no";
|
|
|
|
SELECT 2 IS DISTINCT FROM null as "yes";
|
|
|
|
SELECT null IS DISTINCT FROM null as "no";
|
2005-12-11 11:54:28 +01:00
|
|
|
|
2009-07-11 23:15:32 +02:00
|
|
|
-- negated form
|
2005-12-11 11:54:28 +01:00
|
|
|
SELECT 1 IS NOT DISTINCT FROM 2 as "no";
|
|
|
|
SELECT 2 IS NOT DISTINCT FROM 2 as "yes";
|
|
|
|
SELECT 2 IS NOT DISTINCT FROM null as "no";
|
|
|
|
SELECT null IS NOT DISTINCT FROM null as "yes";
|