2000-01-06 07:40:54 +01:00
|
|
|
--
|
|
|
|
-- SELECT_DISTINCT
|
|
|
|
--
|
|
|
|
--
|
|
|
|
-- awk '{print $3;}' onek.data | sort -n | uniq
|
|
|
|
--
|
2022-02-08 21:30:38 +01:00
|
|
|
SELECT DISTINCT two FROM onek ORDER BY 1;
|
2000-01-06 07:40:54 +01:00
|
|
|
two
|
|
|
|
-----
|
|
|
|
0
|
|
|
|
1
|
1997-04-06 10:29:57 +02:00
|
|
|
(2 rows)
|
|
|
|
|
2000-01-06 07:40:54 +01:00
|
|
|
--
|
|
|
|
-- awk '{print $5;}' onek.data | sort -n | uniq
|
|
|
|
--
|
2022-02-08 21:30:38 +01:00
|
|
|
SELECT DISTINCT ten FROM onek ORDER BY 1;
|
2000-01-06 07:40:54 +01:00
|
|
|
ten
|
|
|
|
-----
|
|
|
|
0
|
|
|
|
1
|
|
|
|
2
|
|
|
|
3
|
|
|
|
4
|
|
|
|
5
|
|
|
|
6
|
|
|
|
7
|
|
|
|
8
|
|
|
|
9
|
1997-04-06 10:29:57 +02:00
|
|
|
(10 rows)
|
|
|
|
|
2000-01-06 07:40:54 +01:00
|
|
|
--
|
|
|
|
-- awk '{print $16;}' onek.data | sort -d | uniq
|
|
|
|
--
|
2022-02-08 21:30:38 +01:00
|
|
|
SELECT DISTINCT string4 FROM onek ORDER BY 1;
|
2000-01-06 07:40:54 +01:00
|
|
|
string4
|
|
|
|
---------
|
|
|
|
AAAAxx
|
|
|
|
HHHHxx
|
|
|
|
OOOOxx
|
|
|
|
VVVVxx
|
1997-04-06 10:29:57 +02:00
|
|
|
(4 rows)
|
|
|
|
|
2000-01-06 07:40:54 +01:00
|
|
|
--
|
|
|
|
-- awk '{print $3,$16,$5;}' onek.data | sort -d | uniq |
|
|
|
|
-- sort +0n -1 +1d -2 +2n -3
|
|
|
|
--
|
|
|
|
SELECT DISTINCT two, string4, ten
|
2022-02-08 21:30:38 +01:00
|
|
|
FROM onek
|
1997-04-06 10:29:57 +02:00
|
|
|
ORDER BY two using <, string4 using <, ten using <;
|
2000-01-06 07:40:54 +01:00
|
|
|
two | string4 | ten
|
|
|
|
-----+---------+-----
|
|
|
|
0 | AAAAxx | 0
|
|
|
|
0 | AAAAxx | 2
|
|
|
|
0 | AAAAxx | 4
|
|
|
|
0 | AAAAxx | 6
|
|
|
|
0 | AAAAxx | 8
|
|
|
|
0 | HHHHxx | 0
|
|
|
|
0 | HHHHxx | 2
|
|
|
|
0 | HHHHxx | 4
|
|
|
|
0 | HHHHxx | 6
|
|
|
|
0 | HHHHxx | 8
|
|
|
|
0 | OOOOxx | 0
|
|
|
|
0 | OOOOxx | 2
|
|
|
|
0 | OOOOxx | 4
|
|
|
|
0 | OOOOxx | 6
|
|
|
|
0 | OOOOxx | 8
|
|
|
|
0 | VVVVxx | 0
|
|
|
|
0 | VVVVxx | 2
|
|
|
|
0 | VVVVxx | 4
|
|
|
|
0 | VVVVxx | 6
|
|
|
|
0 | VVVVxx | 8
|
|
|
|
1 | AAAAxx | 1
|
|
|
|
1 | AAAAxx | 3
|
|
|
|
1 | AAAAxx | 5
|
|
|
|
1 | AAAAxx | 7
|
|
|
|
1 | AAAAxx | 9
|
|
|
|
1 | HHHHxx | 1
|
|
|
|
1 | HHHHxx | 3
|
|
|
|
1 | HHHHxx | 5
|
|
|
|
1 | HHHHxx | 7
|
|
|
|
1 | HHHHxx | 9
|
|
|
|
1 | OOOOxx | 1
|
|
|
|
1 | OOOOxx | 3
|
|
|
|
1 | OOOOxx | 5
|
|
|
|
1 | OOOOxx | 7
|
|
|
|
1 | OOOOxx | 9
|
|
|
|
1 | VVVVxx | 1
|
|
|
|
1 | VVVVxx | 3
|
|
|
|
1 | VVVVxx | 5
|
|
|
|
1 | VVVVxx | 7
|
|
|
|
1 | VVVVxx | 9
|
1997-04-06 10:29:57 +02:00
|
|
|
(40 rows)
|
|
|
|
|
2000-01-06 07:40:54 +01:00
|
|
|
--
|
|
|
|
-- awk '{print $2;}' person.data |
|
|
|
|
-- awk '{if(NF!=1){print $2;}else{print;}}' - emp.data |
|
|
|
|
-- awk '{if(NF!=1){print $2;}else{print;}}' - student.data |
|
|
|
|
-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $5;}else{print;}}' - stud_emp.data |
|
|
|
|
-- sort -n -r | uniq
|
|
|
|
--
|
|
|
|
SELECT DISTINCT p.age FROM person* p ORDER BY age using >;
|
|
|
|
age
|
|
|
|
-----
|
|
|
|
98
|
|
|
|
88
|
|
|
|
78
|
|
|
|
68
|
|
|
|
60
|
|
|
|
58
|
|
|
|
50
|
|
|
|
48
|
|
|
|
40
|
|
|
|
38
|
|
|
|
34
|
|
|
|
30
|
|
|
|
28
|
|
|
|
25
|
|
|
|
24
|
|
|
|
23
|
|
|
|
20
|
|
|
|
19
|
|
|
|
18
|
|
|
|
8
|
1997-04-06 10:29:57 +02:00
|
|
|
(20 rows)
|
|
|
|
|
2016-05-26 20:52:24 +02:00
|
|
|
--
|
|
|
|
-- Check mentioning same column more than once
|
|
|
|
--
|
|
|
|
EXPLAIN (VERBOSE, COSTS OFF)
|
|
|
|
SELECT count(*) FROM
|
|
|
|
(SELECT DISTINCT two, four, two FROM tenk1) ss;
|
|
|
|
QUERY PLAN
|
|
|
|
--------------------------------------------------------
|
|
|
|
Aggregate
|
|
|
|
Output: count(*)
|
|
|
|
-> HashAggregate
|
|
|
|
Output: tenk1.two, tenk1.four, tenk1.two
|
Remove redundant grouping and DISTINCT columns.
Avoid explicitly grouping by columns that we know are redundant
for sorting, for example we need group by only one of x and y in
SELECT ... WHERE x = y GROUP BY x, y
This comes up more often than you might think, as shown by the
changes in the regression tests. It's nearly free to detect too,
since we are just piggybacking on the existing logic that detects
redundant pathkeys. (In some of the existing plans that change,
it's visible that a sort step preceding the grouping step already
didn't bother to sort by the redundant column, making the old plan
a bit silly-looking.)
To do this, build processed_groupClause and processed_distinctClause
lists that omit any provably-redundant sort items, and consult those
not the originals where relevant. This means that within the
planner, one should usually consult root->processed_groupClause or
root->processed_distinctClause if one wants to know which columns
are to be grouped on; but to check whether grouping or distinct-ing
is happening at all, check non-NIL-ness of parse->groupClause or
parse->distinctClause. This is comparable to longstanding rules
about handling the HAVING clause, so I don't think it'll be a huge
maintenance problem.
nodeAgg.c also needs minor mods, because it's now possible to generate
AGG_PLAIN and AGG_SORTED Agg nodes with zero grouping columns.
Patch by me; thanks to Richard Guo and David Rowley for review.
Discussion: https://postgr.es/m/185315.1672179489@sss.pgh.pa.us
2023-01-18 18:37:57 +01:00
|
|
|
Group Key: tenk1.two, tenk1.four
|
2016-05-26 20:52:24 +02:00
|
|
|
-> Seq Scan on public.tenk1
|
|
|
|
Output: tenk1.two, tenk1.four, tenk1.two
|
|
|
|
(7 rows)
|
|
|
|
|
|
|
|
SELECT count(*) FROM
|
|
|
|
(SELECT DISTINCT two, four, two FROM tenk1) ss;
|
|
|
|
count
|
|
|
|
-------
|
|
|
|
4
|
|
|
|
(1 row)
|
|
|
|
|
Disk-based Hash Aggregation.
While performing hash aggregation, track memory usage when adding new
groups to a hash table. If the memory usage exceeds work_mem, enter
"spill mode".
In spill mode, new groups are not created in the hash table(s), but
existing groups continue to be advanced if input tuples match. Tuples
that would cause a new group to be created are instead spilled to a
logical tape to be processed later.
The tuples are spilled in a partitioned fashion. When all tuples from
the outer plan are processed (either by advancing the group or
spilling the tuple), finalize and emit the groups from the hash
table. Then, create new batches of work from the spilled partitions,
and select one of the saved batches and process it (possibly spilling
recursively).
Author: Jeff Davis
Reviewed-by: Tomas Vondra, Adam Lee, Justin Pryzby, Taylor Vesely, Melanie Plageman
Discussion: https://postgr.es/m/507ac540ec7c20136364b5272acbcd4574aa76ef.camel@j-davis.com
2020-03-18 23:42:02 +01:00
|
|
|
--
|
|
|
|
-- Compare results between plans using sorting and plans using hash
|
|
|
|
-- aggregation. Force spilling in both cases by setting work_mem low.
|
|
|
|
--
|
|
|
|
SET work_mem='64kB';
|
|
|
|
-- Produce results with sorting.
|
|
|
|
SET enable_hashagg=FALSE;
|
|
|
|
SET jit_above_cost=0;
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
QUERY PLAN
|
|
|
|
------------------------------------------------
|
|
|
|
Unique
|
|
|
|
-> Sort
|
|
|
|
Sort Key: ((g % 1000))
|
|
|
|
-> Function Scan on generate_series g
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
CREATE TABLE distinct_group_1 AS
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
SET jit_above_cost TO DEFAULT;
|
|
|
|
CREATE TABLE distinct_group_2 AS
|
|
|
|
SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
|
2023-01-10 22:25:43 +01:00
|
|
|
SET enable_seqscan = 0;
|
|
|
|
-- Check to see we get an incremental sort plan
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT hundred, two FROM tenk1;
|
|
|
|
QUERY PLAN
|
|
|
|
-----------------------------------------------------
|
|
|
|
Unique
|
|
|
|
-> Incremental Sort
|
|
|
|
Sort Key: hundred, two
|
|
|
|
Presorted Key: hundred
|
|
|
|
-> Index Scan using tenk1_hundred on tenk1
|
|
|
|
(5 rows)
|
|
|
|
|
|
|
|
RESET enable_seqscan;
|
Disk-based Hash Aggregation.
While performing hash aggregation, track memory usage when adding new
groups to a hash table. If the memory usage exceeds work_mem, enter
"spill mode".
In spill mode, new groups are not created in the hash table(s), but
existing groups continue to be advanced if input tuples match. Tuples
that would cause a new group to be created are instead spilled to a
logical tape to be processed later.
The tuples are spilled in a partitioned fashion. When all tuples from
the outer plan are processed (either by advancing the group or
spilling the tuple), finalize and emit the groups from the hash
table. Then, create new batches of work from the spilled partitions,
and select one of the saved batches and process it (possibly spilling
recursively).
Author: Jeff Davis
Reviewed-by: Tomas Vondra, Adam Lee, Justin Pryzby, Taylor Vesely, Melanie Plageman
Discussion: https://postgr.es/m/507ac540ec7c20136364b5272acbcd4574aa76ef.camel@j-davis.com
2020-03-18 23:42:02 +01:00
|
|
|
SET enable_hashagg=TRUE;
|
|
|
|
-- Produce results with hash aggregation.
|
|
|
|
SET enable_sort=FALSE;
|
|
|
|
SET jit_above_cost=0;
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
QUERY PLAN
|
|
|
|
------------------------------------------
|
|
|
|
HashAggregate
|
|
|
|
Group Key: (g % 1000)
|
|
|
|
-> Function Scan on generate_series g
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
CREATE TABLE distinct_hash_1 AS
|
|
|
|
SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
|
|
|
|
SET jit_above_cost TO DEFAULT;
|
|
|
|
CREATE TABLE distinct_hash_2 AS
|
|
|
|
SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
|
|
|
|
SET enable_sort=TRUE;
|
|
|
|
SET work_mem TO DEFAULT;
|
|
|
|
-- Compare results
|
|
|
|
(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
|
|
|
|
UNION ALL
|
|
|
|
(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
(0 rows)
|
|
|
|
|
|
|
|
(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
|
|
|
|
UNION ALL
|
|
|
|
(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
|
|
|
|
?column?
|
|
|
|
----------
|
|
|
|
(0 rows)
|
|
|
|
|
|
|
|
DROP TABLE distinct_hash_1;
|
|
|
|
DROP TABLE distinct_hash_2;
|
|
|
|
DROP TABLE distinct_group_1;
|
|
|
|
DROP TABLE distinct_group_2;
|
2021-08-22 13:31:16 +02:00
|
|
|
-- Test parallel DISTINCT
|
|
|
|
SET parallel_tuple_cost=0;
|
|
|
|
SET parallel_setup_cost=0;
|
|
|
|
SET min_parallel_table_scan_size=0;
|
2021-08-22 15:44:20 +02:00
|
|
|
SET max_parallel_workers_per_gather=2;
|
2021-08-22 13:31:16 +02:00
|
|
|
-- Ensure we get a parallel plan
|
|
|
|
EXPLAIN (costs off)
|
|
|
|
SELECT DISTINCT four FROM tenk1;
|
|
|
|
QUERY PLAN
|
|
|
|
----------------------------------------------------
|
|
|
|
Unique
|
2024-02-02 12:20:18 +01:00
|
|
|
-> Gather Merge
|
|
|
|
Workers Planned: 2
|
|
|
|
-> Sort
|
|
|
|
Sort Key: four
|
2021-08-22 13:31:16 +02:00
|
|
|
-> HashAggregate
|
|
|
|
Group Key: four
|
|
|
|
-> Parallel Seq Scan on tenk1
|
|
|
|
(8 rows)
|
|
|
|
|
|
|
|
-- Ensure the parallel plan produces the correct results
|
|
|
|
SELECT DISTINCT four FROM tenk1;
|
|
|
|
four
|
|
|
|
------
|
|
|
|
0
|
|
|
|
1
|
|
|
|
2
|
|
|
|
3
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
|
|
|
|
BEGIN
|
|
|
|
RETURN a;
|
|
|
|
END;
|
|
|
|
$$ LANGUAGE plpgsql PARALLEL UNSAFE;
|
|
|
|
-- Ensure we don't do parallel distinct with a parallel unsafe function
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT distinct_func(1) FROM tenk1;
|
|
|
|
QUERY PLAN
|
|
|
|
----------------------------------------------------------
|
|
|
|
Unique
|
|
|
|
-> Sort
|
|
|
|
Sort Key: (distinct_func(1))
|
|
|
|
-> Index Only Scan using tenk1_hundred on tenk1
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
-- make the function parallel safe
|
|
|
|
CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$
|
|
|
|
BEGIN
|
|
|
|
RETURN a;
|
|
|
|
END;
|
|
|
|
$$ LANGUAGE plpgsql PARALLEL SAFE;
|
|
|
|
-- Ensure we do parallel distinct now that the function is parallel safe
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT distinct_func(1) FROM tenk1;
|
2023-01-10 22:25:43 +01:00
|
|
|
QUERY PLAN
|
|
|
|
----------------------------------------------------
|
2021-08-22 13:31:16 +02:00
|
|
|
Unique
|
2023-01-10 22:25:43 +01:00
|
|
|
-> Gather Merge
|
|
|
|
Workers Planned: 2
|
|
|
|
-> Unique
|
|
|
|
-> Sort
|
|
|
|
Sort Key: (distinct_func(1))
|
|
|
|
-> Parallel Seq Scan on tenk1
|
|
|
|
(7 rows)
|
2021-08-22 13:31:16 +02:00
|
|
|
|
2021-08-22 15:44:20 +02:00
|
|
|
RESET max_parallel_workers_per_gather;
|
2021-08-22 13:31:16 +02:00
|
|
|
RESET min_parallel_table_scan_size;
|
|
|
|
RESET parallel_setup_cost;
|
|
|
|
RESET parallel_tuple_cost;
|
2022-10-28 12:04:38 +02:00
|
|
|
--
|
|
|
|
-- Test the planner's ability to use a LIMIT 1 instead of a Unique node when
|
|
|
|
-- all of the distinct_pathkeys have been marked as redundant
|
|
|
|
--
|
|
|
|
-- Ensure we get a plan with a Limit 1
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0;
|
|
|
|
QUERY PLAN
|
|
|
|
----------------------------
|
|
|
|
Limit
|
|
|
|
-> Seq Scan on tenk1
|
|
|
|
Filter: (four = 0)
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
-- Ensure the above gives us the correct result
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0;
|
|
|
|
four
|
|
|
|
------
|
|
|
|
0
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
-- Ensure we get a plan with a Limit 1
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
|
|
|
|
QUERY PLAN
|
|
|
|
---------------------------------------------
|
|
|
|
Limit
|
|
|
|
-> Seq Scan on tenk1
|
|
|
|
Filter: ((two <> 0) AND (four = 0))
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
-- Ensure no rows are returned
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0;
|
|
|
|
four
|
|
|
|
------
|
|
|
|
(0 rows)
|
|
|
|
|
|
|
|
-- Ensure we get a plan with a Limit 1 when the SELECT list contains constants
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
|
|
|
|
QUERY PLAN
|
|
|
|
----------------------------
|
|
|
|
Limit
|
|
|
|
-> Seq Scan on tenk1
|
|
|
|
Filter: (four = 0)
|
|
|
|
(3 rows)
|
|
|
|
|
|
|
|
-- Ensure we only get 1 row
|
|
|
|
SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0;
|
|
|
|
four | ?column? | ?column? | ?column?
|
|
|
|
------+----------+----------+----------
|
|
|
|
0 | 1 | 2 | 3
|
|
|
|
(1 row)
|
|
|
|
|
2024-01-31 05:22:02 +01:00
|
|
|
SET parallel_setup_cost=0;
|
|
|
|
SET min_parallel_table_scan_size=0;
|
|
|
|
SET max_parallel_workers_per_gather=2;
|
|
|
|
-- Ensure we get a plan with a Limit 1 in both partial distinct and final
|
|
|
|
-- distinct
|
|
|
|
EXPLAIN (COSTS OFF)
|
|
|
|
SELECT DISTINCT four FROM tenk1 WHERE four = 10;
|
|
|
|
QUERY PLAN
|
|
|
|
----------------------------------------------
|
|
|
|
Limit
|
|
|
|
-> Gather
|
|
|
|
Workers Planned: 2
|
|
|
|
-> Limit
|
|
|
|
-> Parallel Seq Scan on tenk1
|
|
|
|
Filter: (four = 10)
|
|
|
|
(6 rows)
|
|
|
|
|
|
|
|
RESET max_parallel_workers_per_gather;
|
|
|
|
RESET min_parallel_table_scan_size;
|
|
|
|
RESET parallel_setup_cost;
|
2002-12-13 21:16:11 +01:00
|
|
|
--
|
|
|
|
-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
|
|
|
|
-- very own regression file.
|
|
|
|
--
|
|
|
|
CREATE TEMP TABLE disttable (f1 integer);
|
|
|
|
INSERT INTO DISTTABLE VALUES(1);
|
|
|
|
INSERT INTO DISTTABLE VALUES(2);
|
|
|
|
INSERT INTO DISTTABLE VALUES(3);
|
|
|
|
INSERT INTO DISTTABLE VALUES(NULL);
|
|
|
|
-- basic cases
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM 2 as "not 2" FROM disttable;
|
|
|
|
f1 | not 2
|
|
|
|
----+-------
|
|
|
|
1 | t
|
|
|
|
2 | f
|
|
|
|
3 | t
|
|
|
|
| t
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM NULL as "not null" FROM disttable;
|
|
|
|
f1 | not null
|
|
|
|
----+----------
|
|
|
|
1 | t
|
|
|
|
2 | t
|
|
|
|
3 | t
|
|
|
|
| f
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM f1 as "false" FROM disttable;
|
|
|
|
f1 | false
|
|
|
|
----+-------
|
|
|
|
1 | f
|
|
|
|
2 | f
|
|
|
|
3 | f
|
|
|
|
| f
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
SELECT f1, f1 IS DISTINCT FROM f1+1 as "not null" FROM disttable;
|
|
|
|
f1 | not null
|
|
|
|
----+----------
|
|
|
|
1 | t
|
|
|
|
2 | t
|
|
|
|
3 | t
|
|
|
|
| f
|
|
|
|
(4 rows)
|
|
|
|
|
|
|
|
-- check that optimizer constant-folds it properly
|
|
|
|
SELECT 1 IS DISTINCT FROM 2 as "yes";
|
|
|
|
yes
|
|
|
|
-----
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 2 IS DISTINCT FROM 2 as "no";
|
|
|
|
no
|
|
|
|
----
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 2 IS DISTINCT FROM null as "yes";
|
|
|
|
yes
|
|
|
|
-----
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT null IS DISTINCT FROM null as "no";
|
|
|
|
no
|
|
|
|
----
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
2009-07-11 23:15:32 +02:00
|
|
|
-- negated form
|
2005-12-11 11:54:28 +01:00
|
|
|
SELECT 1 IS NOT DISTINCT FROM 2 as "no";
|
|
|
|
no
|
|
|
|
----
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 2 IS NOT DISTINCT FROM 2 as "yes";
|
|
|
|
yes
|
|
|
|
-----
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT 2 IS NOT DISTINCT FROM null as "no";
|
|
|
|
no
|
|
|
|
----
|
|
|
|
f
|
|
|
|
(1 row)
|
|
|
|
|
|
|
|
SELECT null IS NOT DISTINCT FROM null as "yes";
|
|
|
|
yes
|
|
|
|
-----
|
|
|
|
t
|
|
|
|
(1 row)
|
|
|
|
|