1297 lines
60 KiB
Plaintext
1297 lines
60 KiB
Plaintext
--
|
|
-- PARALLEL
|
|
--
|
|
create function sp_parallel_restricted(int) returns int as
|
|
$$begin return $1; end$$ language plpgsql parallel restricted;
|
|
begin;
|
|
-- encourage use of parallel plans
|
|
set parallel_setup_cost=0;
|
|
set parallel_tuple_cost=0;
|
|
set min_parallel_table_scan_size=0;
|
|
set max_parallel_workers_per_gather=4;
|
|
-- Parallel Append with partial-subplans
|
|
explain (costs off)
|
|
select round(avg(aa)), sum(aa) from a_star;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 3
|
|
-> Partial Aggregate
|
|
-> Parallel Append
|
|
-> Parallel Seq Scan on d_star a_star_4
|
|
-> Parallel Seq Scan on f_star a_star_6
|
|
-> Parallel Seq Scan on e_star a_star_5
|
|
-> Parallel Seq Scan on b_star a_star_2
|
|
-> Parallel Seq Scan on c_star a_star_3
|
|
-> Parallel Seq Scan on a_star a_star_1
|
|
(11 rows)
|
|
|
|
select round(avg(aa)), sum(aa) from a_star a1;
|
|
round | sum
|
|
-------+-----
|
|
14 | 355
|
|
(1 row)
|
|
|
|
-- Parallel Append with both partial and non-partial subplans
|
|
alter table c_star set (parallel_workers = 0);
|
|
alter table d_star set (parallel_workers = 0);
|
|
explain (costs off)
|
|
select round(avg(aa)), sum(aa) from a_star;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 3
|
|
-> Partial Aggregate
|
|
-> Parallel Append
|
|
-> Seq Scan on d_star a_star_4
|
|
-> Seq Scan on c_star a_star_3
|
|
-> Parallel Seq Scan on f_star a_star_6
|
|
-> Parallel Seq Scan on e_star a_star_5
|
|
-> Parallel Seq Scan on b_star a_star_2
|
|
-> Parallel Seq Scan on a_star a_star_1
|
|
(11 rows)
|
|
|
|
select round(avg(aa)), sum(aa) from a_star a2;
|
|
round | sum
|
|
-------+-----
|
|
14 | 355
|
|
(1 row)
|
|
|
|
-- Parallel Append with only non-partial subplans
|
|
alter table a_star set (parallel_workers = 0);
|
|
alter table b_star set (parallel_workers = 0);
|
|
alter table e_star set (parallel_workers = 0);
|
|
alter table f_star set (parallel_workers = 0);
|
|
explain (costs off)
|
|
select round(avg(aa)), sum(aa) from a_star;
|
|
QUERY PLAN
|
|
-----------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 3
|
|
-> Partial Aggregate
|
|
-> Parallel Append
|
|
-> Seq Scan on d_star a_star_4
|
|
-> Seq Scan on f_star a_star_6
|
|
-> Seq Scan on e_star a_star_5
|
|
-> Seq Scan on b_star a_star_2
|
|
-> Seq Scan on c_star a_star_3
|
|
-> Seq Scan on a_star a_star_1
|
|
(11 rows)
|
|
|
|
select round(avg(aa)), sum(aa) from a_star a3;
|
|
round | sum
|
|
-------+-----
|
|
14 | 355
|
|
(1 row)
|
|
|
|
-- Disable Parallel Append
|
|
alter table a_star reset (parallel_workers);
|
|
alter table b_star reset (parallel_workers);
|
|
alter table c_star reset (parallel_workers);
|
|
alter table d_star reset (parallel_workers);
|
|
alter table e_star reset (parallel_workers);
|
|
alter table f_star reset (parallel_workers);
|
|
set enable_parallel_append to off;
|
|
explain (costs off)
|
|
select round(avg(aa)), sum(aa) from a_star;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 1
|
|
-> Partial Aggregate
|
|
-> Append
|
|
-> Parallel Seq Scan on a_star a_star_1
|
|
-> Parallel Seq Scan on b_star a_star_2
|
|
-> Parallel Seq Scan on c_star a_star_3
|
|
-> Parallel Seq Scan on d_star a_star_4
|
|
-> Parallel Seq Scan on e_star a_star_5
|
|
-> Parallel Seq Scan on f_star a_star_6
|
|
(11 rows)
|
|
|
|
select round(avg(aa)), sum(aa) from a_star a4;
|
|
round | sum
|
|
-------+-----
|
|
14 | 355
|
|
(1 row)
|
|
|
|
reset enable_parallel_append;
|
|
-- Parallel Append that runs serially
|
|
create function sp_test_func() returns setof text as
|
|
$$ select 'foo'::varchar union all select 'bar'::varchar $$
|
|
language sql stable;
|
|
select sp_test_func() order by 1;
|
|
sp_test_func
|
|
--------------
|
|
bar
|
|
foo
|
|
(2 rows)
|
|
|
|
-- Parallel Append is not to be used when the subpath depends on the outer param
|
|
create table part_pa_test(a int, b int) partition by range(a);
|
|
create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0);
|
|
create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue);
|
|
explain (costs off)
|
|
select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a)))
|
|
from part_pa_test pa2;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------
|
|
Aggregate
|
|
-> Gather
|
|
Workers Planned: 3
|
|
-> Parallel Append
|
|
-> Parallel Seq Scan on part_pa_test_p1 pa2_1
|
|
-> Parallel Seq Scan on part_pa_test_p2 pa2_2
|
|
SubPlan 2
|
|
-> Result
|
|
SubPlan 1
|
|
-> Append
|
|
-> Seq Scan on part_pa_test_p1 pa1_1
|
|
Filter: (a = pa2.a)
|
|
-> Seq Scan on part_pa_test_p2 pa1_2
|
|
Filter: (a = pa2.a)
|
|
(14 rows)
|
|
|
|
drop table part_pa_test;
|
|
-- test with leader participation disabled
|
|
set parallel_leader_participation = off;
|
|
explain (costs off)
|
|
select count(*) from tenk1 where stringu1 = 'GRAAAA';
|
|
QUERY PLAN
|
|
---------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (stringu1 = 'GRAAAA'::name)
|
|
(6 rows)
|
|
|
|
select count(*) from tenk1 where stringu1 = 'GRAAAA';
|
|
count
|
|
-------
|
|
15
|
|
(1 row)
|
|
|
|
-- test with leader participation disabled, but no workers available (so
|
|
-- the leader will have to run the plan despite the setting)
|
|
set max_parallel_workers = 0;
|
|
explain (costs off)
|
|
select count(*) from tenk1 where stringu1 = 'GRAAAA';
|
|
QUERY PLAN
|
|
---------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (stringu1 = 'GRAAAA'::name)
|
|
(6 rows)
|
|
|
|
select count(*) from tenk1 where stringu1 = 'GRAAAA';
|
|
count
|
|
-------
|
|
15
|
|
(1 row)
|
|
|
|
reset max_parallel_workers;
|
|
reset parallel_leader_participation;
|
|
-- test that parallel_restricted function doesn't run in worker
|
|
alter table tenk1 set (parallel_workers = 4);
|
|
explain (verbose, costs off)
|
|
select sp_parallel_restricted(unique1) from tenk1
|
|
where stringu1 = 'GRAAAA' order by 1;
|
|
QUERY PLAN
|
|
---------------------------------------------------------
|
|
Sort
|
|
Output: (sp_parallel_restricted(unique1))
|
|
Sort Key: (sp_parallel_restricted(tenk1.unique1))
|
|
-> Gather
|
|
Output: sp_parallel_restricted(unique1)
|
|
Workers Planned: 4
|
|
-> Parallel Seq Scan on public.tenk1
|
|
Output: unique1
|
|
Filter: (tenk1.stringu1 = 'GRAAAA'::name)
|
|
(9 rows)
|
|
|
|
-- test parallel plan when group by expression is in target list.
|
|
explain (costs off)
|
|
select length(stringu1) from tenk1 group by length(stringu1);
|
|
QUERY PLAN
|
|
---------------------------------------------------
|
|
Finalize HashAggregate
|
|
Group Key: (length((stringu1)::text))
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial HashAggregate
|
|
Group Key: length((stringu1)::text)
|
|
-> Parallel Seq Scan on tenk1
|
|
(7 rows)
|
|
|
|
select length(stringu1) from tenk1 group by length(stringu1);
|
|
length
|
|
--------
|
|
6
|
|
(1 row)
|
|
|
|
explain (costs off)
|
|
select stringu1, count(*) from tenk1 group by stringu1 order by stringu1;
|
|
QUERY PLAN
|
|
----------------------------------------------------
|
|
Sort
|
|
Sort Key: stringu1
|
|
-> Finalize HashAggregate
|
|
Group Key: stringu1
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial HashAggregate
|
|
Group Key: stringu1
|
|
-> Parallel Seq Scan on tenk1
|
|
(9 rows)
|
|
|
|
-- test that parallel plan for aggregates is not selected when
|
|
-- target list contains parallel restricted clause.
|
|
explain (costs off)
|
|
select sum(sp_parallel_restricted(unique1)) from tenk1
|
|
group by(sp_parallel_restricted(unique1));
|
|
QUERY PLAN
|
|
-------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: sp_parallel_restricted(unique1)
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Index Only Scan using tenk1_unique1 on tenk1
|
|
(5 rows)
|
|
|
|
-- test prepared statement
|
|
prepare tenk1_count(integer) As select count((unique1)) from tenk1 where hundred > $1;
|
|
explain (costs off) execute tenk1_count(1);
|
|
QUERY PLAN
|
|
----------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (hundred > 1)
|
|
(6 rows)
|
|
|
|
execute tenk1_count(1);
|
|
count
|
|
-------
|
|
9800
|
|
(1 row)
|
|
|
|
deallocate tenk1_count;
|
|
-- test parallel plans for queries containing un-correlated subplans.
|
|
alter table tenk2 set (parallel_workers = 0);
|
|
explain (costs off)
|
|
select count(*) from tenk1 where (two, four) not in
|
|
(select hundred, thousand from tenk2 where thousand > 100);
|
|
QUERY PLAN
|
|
----------------------------------------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (NOT (ANY ((two = (hashed SubPlan 1).col1) AND (four = (hashed SubPlan 1).col2))))
|
|
SubPlan 1
|
|
-> Seq Scan on tenk2
|
|
Filter: (thousand > 100)
|
|
(9 rows)
|
|
|
|
select count(*) from tenk1 where (two, four) not in
|
|
(select hundred, thousand from tenk2 where thousand > 100);
|
|
count
|
|
-------
|
|
10000
|
|
(1 row)
|
|
|
|
-- this is not parallel-safe due to use of random() within SubLink's testexpr:
|
|
explain (costs off)
|
|
select * from tenk1 where (unique1 + random())::integer not in
|
|
(select ten from tenk2);
|
|
QUERY PLAN
|
|
-------------------------------------------------------------------------------------------------------
|
|
Seq Scan on tenk1
|
|
Filter: (NOT (ANY ((((unique1)::double precision + random()))::integer = (hashed SubPlan 1).col1)))
|
|
SubPlan 1
|
|
-> Seq Scan on tenk2
|
|
(4 rows)
|
|
|
|
alter table tenk2 reset (parallel_workers);
|
|
-- test parallel plan for a query containing initplan.
|
|
set enable_indexscan = off;
|
|
set enable_indexonlyscan = off;
|
|
set enable_bitmapscan = off;
|
|
alter table tenk2 set (parallel_workers = 2);
|
|
explain (costs off)
|
|
select count(*) from tenk1
|
|
where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2);
|
|
QUERY PLAN
|
|
------------------------------------------------------
|
|
Aggregate
|
|
InitPlan 1
|
|
-> Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on tenk2
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (unique1 = (InitPlan 1).col1)
|
|
(11 rows)
|
|
|
|
select count(*) from tenk1
|
|
where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2);
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
reset enable_indexscan;
|
|
reset enable_indexonlyscan;
|
|
reset enable_bitmapscan;
|
|
alter table tenk2 reset (parallel_workers);
|
|
-- test parallel index scans.
|
|
set enable_seqscan to off;
|
|
set enable_bitmapscan to off;
|
|
set random_page_cost = 2;
|
|
explain (costs off)
|
|
select count((unique1)) from tenk1 where hundred > 1;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Index Scan using tenk1_hundred on tenk1
|
|
Index Cond: (hundred > 1)
|
|
(6 rows)
|
|
|
|
select count((unique1)) from tenk1 where hundred > 1;
|
|
count
|
|
-------
|
|
9800
|
|
(1 row)
|
|
|
|
-- Parallel ScalarArrayOp index scan
|
|
explain (costs off)
|
|
select count((unique1)) from tenk1
|
|
where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]);
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
InitPlan 1
|
|
-> Aggregate
|
|
-> Function Scan on generate_series i
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Index Scan using tenk1_hundred on tenk1
|
|
Index Cond: (hundred = ANY ((InitPlan 1).col1))
|
|
(9 rows)
|
|
|
|
select count((unique1)) from tenk1
|
|
where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]);
|
|
count
|
|
-------
|
|
700
|
|
(1 row)
|
|
|
|
-- test parallel index-only scans.
|
|
explain (costs off)
|
|
select count(*) from tenk1 where thousand > 95;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1
|
|
Index Cond: (thousand > 95)
|
|
(6 rows)
|
|
|
|
select count(*) from tenk1 where thousand > 95;
|
|
count
|
|
-------
|
|
9040
|
|
(1 row)
|
|
|
|
-- test rescan cases too
|
|
set enable_material = false;
|
|
explain (costs off)
|
|
select * from
|
|
(select count(unique1) from tenk1 where hundred > 10) ss
|
|
right join (values (1),(2),(3)) v(x) on true;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------
|
|
Nested Loop Left Join
|
|
-> Values Scan on "*VALUES*"
|
|
-> Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Index Scan using tenk1_hundred on tenk1
|
|
Index Cond: (hundred > 10)
|
|
(8 rows)
|
|
|
|
select * from
|
|
(select count(unique1) from tenk1 where hundred > 10) ss
|
|
right join (values (1),(2),(3)) v(x) on true;
|
|
count | x
|
|
-------+---
|
|
8900 | 1
|
|
8900 | 2
|
|
8900 | 3
|
|
(3 rows)
|
|
|
|
explain (costs off)
|
|
select * from
|
|
(select count(*) from tenk1 where thousand > 99) ss
|
|
right join (values (1),(2),(3)) v(x) on true;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------------------
|
|
Nested Loop Left Join
|
|
-> Values Scan on "*VALUES*"
|
|
-> Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1
|
|
Index Cond: (thousand > 99)
|
|
(8 rows)
|
|
|
|
select * from
|
|
(select count(*) from tenk1 where thousand > 99) ss
|
|
right join (values (1),(2),(3)) v(x) on true;
|
|
count | x
|
|
-------+---
|
|
9000 | 1
|
|
9000 | 2
|
|
9000 | 3
|
|
(3 rows)
|
|
|
|
-- test rescans for a Limit node with a parallel node beneath it.
|
|
reset enable_seqscan;
|
|
set enable_indexonlyscan to off;
|
|
set enable_indexscan to off;
|
|
alter table tenk1 set (parallel_workers = 0);
|
|
alter table tenk2 set (parallel_workers = 1);
|
|
explain (costs off)
|
|
select count(*) from tenk1
|
|
left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
|
|
on tenk1.unique1 < ss.unique1 + 1
|
|
where tenk1.unique1 < 2;
|
|
QUERY PLAN
|
|
------------------------------------------------------------
|
|
Aggregate
|
|
-> Nested Loop Left Join
|
|
Join Filter: (tenk1.unique1 < (tenk2.unique1 + 1))
|
|
-> Seq Scan on tenk1
|
|
Filter: (unique1 < 2)
|
|
-> Limit
|
|
-> Gather Merge
|
|
Workers Planned: 1
|
|
-> Sort
|
|
Sort Key: tenk2.unique1
|
|
-> Parallel Seq Scan on tenk2
|
|
(11 rows)
|
|
|
|
select count(*) from tenk1
|
|
left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
|
|
on tenk1.unique1 < ss.unique1 + 1
|
|
where tenk1.unique1 < 2;
|
|
count
|
|
-------
|
|
1999
|
|
(1 row)
|
|
|
|
--reset the value of workers for each table as it was before this test.
|
|
alter table tenk1 set (parallel_workers = 4);
|
|
alter table tenk2 reset (parallel_workers);
|
|
reset enable_material;
|
|
reset enable_bitmapscan;
|
|
reset enable_indexonlyscan;
|
|
reset enable_indexscan;
|
|
-- test parallel bitmap heap scan.
|
|
set enable_seqscan to off;
|
|
set enable_indexscan to off;
|
|
set enable_hashjoin to off;
|
|
set enable_mergejoin to off;
|
|
set enable_material to off;
|
|
-- test prefetching, if the platform allows it
|
|
DO $$
|
|
BEGIN
|
|
SET effective_io_concurrency = 50;
|
|
EXCEPTION WHEN invalid_parameter_value THEN
|
|
END $$;
|
|
set work_mem='64kB'; --set small work mem to force lossy pages
|
|
explain (costs off)
|
|
select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0;
|
|
QUERY PLAN
|
|
------------------------------------------------------------
|
|
Aggregate
|
|
-> Nested Loop
|
|
-> Seq Scan on tenk2
|
|
Filter: (thousand = 0)
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Bitmap Heap Scan on tenk1
|
|
Recheck Cond: (hundred > 1)
|
|
-> Bitmap Index Scan on tenk1_hundred
|
|
Index Cond: (hundred > 1)
|
|
(10 rows)
|
|
|
|
select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0;
|
|
count
|
|
-------
|
|
98000
|
|
(1 row)
|
|
|
|
create table bmscantest (a int, t text);
|
|
insert into bmscantest select r, 'fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,100000) r;
|
|
create index i_bmtest ON bmscantest(a);
|
|
select count(*) from bmscantest where a>1;
|
|
count
|
|
-------
|
|
99999
|
|
(1 row)
|
|
|
|
-- test accumulation of stats for parallel nodes
|
|
reset enable_seqscan;
|
|
alter table tenk2 set (parallel_workers = 0);
|
|
explain (analyze, timing off, summary off, costs off)
|
|
select count(*) from tenk1, tenk2 where tenk1.hundred > 1
|
|
and tenk2.thousand=0;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------
|
|
Aggregate (actual rows=1 loops=1)
|
|
-> Nested Loop (actual rows=98000 loops=1)
|
|
-> Seq Scan on tenk2 (actual rows=10 loops=1)
|
|
Filter: (thousand = 0)
|
|
Rows Removed by Filter: 9990
|
|
-> Gather (actual rows=9800 loops=10)
|
|
Workers Planned: 4
|
|
Workers Launched: 4
|
|
-> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50)
|
|
Filter: (hundred > 1)
|
|
Rows Removed by Filter: 40
|
|
(11 rows)
|
|
|
|
alter table tenk2 reset (parallel_workers);
|
|
reset work_mem;
|
|
create function explain_parallel_sort_stats() returns setof text
|
|
language plpgsql as
|
|
$$
|
|
declare ln text;
|
|
begin
|
|
for ln in
|
|
explain (analyze, timing off, summary off, costs off)
|
|
select * from
|
|
(select ten from tenk1 where ten < 100 order by ten) ss
|
|
right join (values (1),(2),(3)) v(x) on true
|
|
loop
|
|
ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx');
|
|
return next ln;
|
|
end loop;
|
|
end;
|
|
$$;
|
|
select * from explain_parallel_sort_stats();
|
|
explain_parallel_sort_stats
|
|
--------------------------------------------------------------------------
|
|
Nested Loop Left Join (actual rows=30000 loops=1)
|
|
-> Values Scan on "*VALUES*" (actual rows=3 loops=1)
|
|
-> Gather Merge (actual rows=10000 loops=3)
|
|
Workers Planned: 4
|
|
Workers Launched: 4
|
|
-> Sort (actual rows=2000 loops=15)
|
|
Sort Key: tenk1.ten
|
|
Sort Method: quicksort Memory: xxx
|
|
Worker 0: Sort Method: quicksort Memory: xxx
|
|
Worker 1: Sort Method: quicksort Memory: xxx
|
|
Worker 2: Sort Method: quicksort Memory: xxx
|
|
Worker 3: Sort Method: quicksort Memory: xxx
|
|
-> Parallel Seq Scan on tenk1 (actual rows=2000 loops=15)
|
|
Filter: (ten < 100)
|
|
(14 rows)
|
|
|
|
reset enable_indexscan;
|
|
reset enable_hashjoin;
|
|
reset enable_mergejoin;
|
|
reset enable_material;
|
|
reset effective_io_concurrency;
|
|
drop table bmscantest;
|
|
drop function explain_parallel_sort_stats();
|
|
-- test parallel merge join path.
|
|
set enable_hashjoin to off;
|
|
set enable_nestloop to off;
|
|
explain (costs off)
|
|
select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1;
|
|
QUERY PLAN
|
|
-------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Merge Join
|
|
Merge Cond: (tenk1.unique1 = tenk2.unique1)
|
|
-> Parallel Index Only Scan using tenk1_unique1 on tenk1
|
|
-> Index Only Scan using tenk2_unique1 on tenk2
|
|
(8 rows)
|
|
|
|
select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1;
|
|
count
|
|
-------
|
|
10000
|
|
(1 row)
|
|
|
|
reset enable_hashjoin;
|
|
reset enable_nestloop;
|
|
-- test gather merge
|
|
set enable_hashagg = false;
|
|
explain (costs off)
|
|
select count(*) from tenk1 group by twenty;
|
|
QUERY PLAN
|
|
----------------------------------------------------
|
|
Finalize GroupAggregate
|
|
Group Key: twenty
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Partial GroupAggregate
|
|
Group Key: twenty
|
|
-> Sort
|
|
Sort Key: twenty
|
|
-> Parallel Seq Scan on tenk1
|
|
(9 rows)
|
|
|
|
select count(*) from tenk1 group by twenty;
|
|
count
|
|
-------
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
(20 rows)
|
|
|
|
--test expressions in targetlist are pushed down for gather merge
|
|
create function sp_simple_func(var1 integer) returns integer
|
|
as $$
|
|
begin
|
|
return var1 + 10;
|
|
end;
|
|
$$ language plpgsql PARALLEL SAFE;
|
|
explain (costs off, verbose)
|
|
select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten;
|
|
QUERY PLAN
|
|
-----------------------------------------------------
|
|
Gather Merge
|
|
Output: ten, (sp_simple_func(ten))
|
|
Workers Planned: 4
|
|
-> Result
|
|
Output: ten, sp_simple_func(ten)
|
|
-> Sort
|
|
Output: ten
|
|
Sort Key: tenk1.ten
|
|
-> Parallel Seq Scan on public.tenk1
|
|
Output: ten
|
|
Filter: (tenk1.ten < 100)
|
|
(11 rows)
|
|
|
|
drop function sp_simple_func(integer);
|
|
-- test handling of SRFs in targetlist (bug in 10.0)
|
|
explain (costs off)
|
|
select count(*), generate_series(1,2) from tenk1 group by twenty;
|
|
QUERY PLAN
|
|
----------------------------------------------------------
|
|
ProjectSet
|
|
-> Finalize GroupAggregate
|
|
Group Key: twenty
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Partial GroupAggregate
|
|
Group Key: twenty
|
|
-> Sort
|
|
Sort Key: twenty
|
|
-> Parallel Seq Scan on tenk1
|
|
(10 rows)
|
|
|
|
select count(*), generate_series(1,2) from tenk1 group by twenty;
|
|
count | generate_series
|
|
-------+-----------------
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
500 | 1
|
|
500 | 2
|
|
(40 rows)
|
|
|
|
-- test gather merge with parallel leader participation disabled
|
|
set parallel_leader_participation = off;
|
|
explain (costs off)
|
|
select count(*) from tenk1 group by twenty;
|
|
QUERY PLAN
|
|
----------------------------------------------------
|
|
Finalize GroupAggregate
|
|
Group Key: twenty
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Partial GroupAggregate
|
|
Group Key: twenty
|
|
-> Sort
|
|
Sort Key: twenty
|
|
-> Parallel Seq Scan on tenk1
|
|
(9 rows)
|
|
|
|
select count(*) from tenk1 group by twenty;
|
|
count
|
|
-------
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
500
|
|
(20 rows)
|
|
|
|
reset parallel_leader_participation;
|
|
--test rescan behavior of gather merge
|
|
set enable_material = false;
|
|
explain (costs off)
|
|
select * from
|
|
(select string4, count(unique2)
|
|
from tenk1 group by string4 order by string4) ss
|
|
right join (values (1),(2),(3)) v(x) on true;
|
|
QUERY PLAN
|
|
----------------------------------------------------------
|
|
Nested Loop Left Join
|
|
-> Values Scan on "*VALUES*"
|
|
-> Finalize GroupAggregate
|
|
Group Key: tenk1.string4
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Partial GroupAggregate
|
|
Group Key: tenk1.string4
|
|
-> Sort
|
|
Sort Key: tenk1.string4
|
|
-> Parallel Seq Scan on tenk1
|
|
(11 rows)
|
|
|
|
select * from
|
|
(select string4, count(unique2)
|
|
from tenk1 group by string4 order by string4) ss
|
|
right join (values (1),(2),(3)) v(x) on true;
|
|
string4 | count | x
|
|
---------+-------+---
|
|
AAAAxx | 2500 | 1
|
|
HHHHxx | 2500 | 1
|
|
OOOOxx | 2500 | 1
|
|
VVVVxx | 2500 | 1
|
|
AAAAxx | 2500 | 2
|
|
HHHHxx | 2500 | 2
|
|
OOOOxx | 2500 | 2
|
|
VVVVxx | 2500 | 2
|
|
AAAAxx | 2500 | 3
|
|
HHHHxx | 2500 | 3
|
|
OOOOxx | 2500 | 3
|
|
VVVVxx | 2500 | 3
|
|
(12 rows)
|
|
|
|
reset enable_material;
|
|
reset enable_hashagg;
|
|
-- check parallelized int8 aggregate (bug #14897)
|
|
explain (costs off)
|
|
select avg(unique1::int8) from tenk1;
|
|
QUERY PLAN
|
|
-------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial Aggregate
|
|
-> Parallel Index Only Scan using tenk1_unique1 on tenk1
|
|
(5 rows)
|
|
|
|
select avg(unique1::int8) from tenk1;
|
|
avg
|
|
-----------------------
|
|
4999.5000000000000000
|
|
(1 row)
|
|
|
|
-- gather merge test with a LIMIT
|
|
explain (costs off)
|
|
select fivethous from tenk1 order by fivethous limit 4;
|
|
QUERY PLAN
|
|
----------------------------------------------
|
|
Limit
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Sort
|
|
Sort Key: fivethous
|
|
-> Parallel Seq Scan on tenk1
|
|
(6 rows)
|
|
|
|
select fivethous from tenk1 order by fivethous limit 4;
|
|
fivethous
|
|
-----------
|
|
0
|
|
0
|
|
1
|
|
1
|
|
(4 rows)
|
|
|
|
-- gather merge test with 0 worker
|
|
set max_parallel_workers = 0;
|
|
explain (costs off)
|
|
select string4 from tenk1 order by string4 limit 5;
|
|
QUERY PLAN
|
|
----------------------------------------------
|
|
Limit
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Sort
|
|
Sort Key: string4
|
|
-> Parallel Seq Scan on tenk1
|
|
(6 rows)
|
|
|
|
select string4 from tenk1 order by string4 limit 5;
|
|
string4
|
|
---------
|
|
AAAAxx
|
|
AAAAxx
|
|
AAAAxx
|
|
AAAAxx
|
|
AAAAxx
|
|
(5 rows)
|
|
|
|
-- gather merge test with 0 workers, with parallel leader
|
|
-- participation disabled (the leader will have to run the plan
|
|
-- despite the setting)
|
|
set parallel_leader_participation = off;
|
|
explain (costs off)
|
|
select string4 from tenk1 order by string4 limit 5;
|
|
QUERY PLAN
|
|
----------------------------------------------
|
|
Limit
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Sort
|
|
Sort Key: string4
|
|
-> Parallel Seq Scan on tenk1
|
|
(6 rows)
|
|
|
|
select string4 from tenk1 order by string4 limit 5;
|
|
string4
|
|
---------
|
|
AAAAxx
|
|
AAAAxx
|
|
AAAAxx
|
|
AAAAxx
|
|
AAAAxx
|
|
(5 rows)
|
|
|
|
reset parallel_leader_participation;
|
|
reset max_parallel_workers;
|
|
create function parallel_safe_volatile(a int) returns int as
|
|
$$ begin return a; end; $$ parallel safe volatile language plpgsql;
|
|
-- Test gather merge atop of a sort of a partial path
|
|
explain (costs off)
|
|
select * from tenk1 where four = 2
|
|
order by four, hundred, parallel_safe_volatile(thousand);
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Gather Merge
|
|
Workers Planned: 4
|
|
-> Sort
|
|
Sort Key: hundred, (parallel_safe_volatile(thousand))
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (four = 2)
|
|
(6 rows)
|
|
|
|
-- Test gather merge atop of an incremental sort a of partial path
|
|
set min_parallel_index_scan_size = 0;
|
|
set enable_seqscan = off;
|
|
explain (costs off)
|
|
select * from tenk1 where four = 2
|
|
order by four, hundred, parallel_safe_volatile(thousand);
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Gather Merge
|
|
Workers Planned: 4
|
|
-> Incremental Sort
|
|
Sort Key: hundred, (parallel_safe_volatile(thousand))
|
|
Presorted Key: hundred
|
|
-> Parallel Index Scan using tenk1_hundred on tenk1
|
|
Filter: (four = 2)
|
|
(7 rows)
|
|
|
|
reset min_parallel_index_scan_size;
|
|
reset enable_seqscan;
|
|
-- Test GROUP BY with a gather merge path atop of a sort of a partial path
|
|
explain (costs off)
|
|
select count(*) from tenk1
|
|
group by twenty, parallel_safe_volatile(two);
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------
|
|
Finalize GroupAggregate
|
|
Group Key: twenty, (parallel_safe_volatile(two))
|
|
-> Gather Merge
|
|
Workers Planned: 4
|
|
-> Sort
|
|
Sort Key: twenty, (parallel_safe_volatile(two))
|
|
-> Partial HashAggregate
|
|
Group Key: twenty, parallel_safe_volatile(two)
|
|
-> Parallel Seq Scan on tenk1
|
|
(9 rows)
|
|
|
|
drop function parallel_safe_volatile(int);
|
|
SAVEPOINT settings;
|
|
SET LOCAL debug_parallel_query = 1;
|
|
explain (costs off)
|
|
select stringu1::int2 from tenk1 where unique1 = 1;
|
|
QUERY PLAN
|
|
-----------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Index Scan using tenk1_unique1 on tenk1
|
|
Index Cond: (unique1 = 1)
|
|
(5 rows)
|
|
|
|
ROLLBACK TO SAVEPOINT settings;
|
|
-- exercise record typmod remapping between backends
|
|
CREATE FUNCTION make_record(n int)
|
|
RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS
|
|
$$
|
|
BEGIN
|
|
RETURN CASE n
|
|
WHEN 1 THEN ROW(1)
|
|
WHEN 2 THEN ROW(1, 2)
|
|
WHEN 3 THEN ROW(1, 2, 3)
|
|
WHEN 4 THEN ROW(1, 2, 3, 4)
|
|
ELSE ROW(1, 2, 3, 4, 5)
|
|
END;
|
|
END;
|
|
$$;
|
|
SAVEPOINT settings;
|
|
SET LOCAL debug_parallel_query = 1;
|
|
SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x;
|
|
make_record
|
|
-------------
|
|
(1)
|
|
(1,2)
|
|
(1,2,3)
|
|
(1,2,3,4)
|
|
(1,2,3,4,5)
|
|
(5 rows)
|
|
|
|
ROLLBACK TO SAVEPOINT settings;
|
|
DROP function make_record(n int);
|
|
-- test the sanity of parallel query after the active role is dropped.
|
|
drop role if exists regress_parallel_worker;
|
|
NOTICE: role "regress_parallel_worker" does not exist, skipping
|
|
create role regress_parallel_worker;
|
|
set role regress_parallel_worker;
|
|
reset session authorization;
|
|
drop role regress_parallel_worker;
|
|
set debug_parallel_query = 1;
|
|
select count(*) from tenk1;
|
|
count
|
|
-------
|
|
10000
|
|
(1 row)
|
|
|
|
reset debug_parallel_query;
|
|
reset role;
|
|
-- Window function calculation can't be pushed to workers.
|
|
explain (costs off, verbose)
|
|
select count(*) from tenk1 a where (unique1, two) in
|
|
(select unique1, row_number() over() from tenk1 b);
|
|
QUERY PLAN
|
|
----------------------------------------------------------------------------------------------
|
|
Aggregate
|
|
Output: count(*)
|
|
-> Hash Semi Join
|
|
Hash Cond: ((a.unique1 = b.unique1) AND (a.two = (row_number() OVER (?))))
|
|
-> Gather
|
|
Output: a.unique1, a.two
|
|
Workers Planned: 4
|
|
-> Parallel Seq Scan on public.tenk1 a
|
|
Output: a.unique1, a.two
|
|
-> Hash
|
|
Output: b.unique1, (row_number() OVER (?))
|
|
-> WindowAgg
|
|
Output: b.unique1, row_number() OVER (?)
|
|
-> Gather
|
|
Output: b.unique1
|
|
Workers Planned: 4
|
|
-> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b
|
|
Output: b.unique1
|
|
(18 rows)
|
|
|
|
-- LIMIT/OFFSET within sub-selects can't be pushed to workers.
|
|
explain (costs off)
|
|
select * from tenk1 a where two in
|
|
(select two from tenk1 b where stringu1 like '%AAAA' limit 3);
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Hash Semi Join
|
|
Hash Cond: (a.two = b.two)
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Seq Scan on tenk1 a
|
|
-> Hash
|
|
-> Limit
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Seq Scan on tenk1 b
|
|
Filter: (stringu1 ~~ '%AAAA'::text)
|
|
(11 rows)
|
|
|
|
-- to increase the parallel query test coverage
|
|
SAVEPOINT settings;
|
|
SET LOCAL debug_parallel_query = 1;
|
|
EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1;
|
|
QUERY PLAN
|
|
-------------------------------------------------------------
|
|
Gather (actual rows=10000 loops=1)
|
|
Workers Planned: 4
|
|
Workers Launched: 4
|
|
-> Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
|
|
(4 rows)
|
|
|
|
ROLLBACK TO SAVEPOINT settings;
|
|
-- provoke error in worker
|
|
-- (make the error message long enough to require multiple bufferloads)
|
|
SAVEPOINT settings;
|
|
SET LOCAL debug_parallel_query = 1;
|
|
select (stringu1 || repeat('abcd', 5000))::int2 from tenk1 where unique1 = 1;
|
|
ERROR: invalid input syntax for type smallint: "BAAAAAabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
|
|
CONTEXT: parallel worker
|
|
ROLLBACK TO SAVEPOINT settings;
|
|
-- test interaction with set-returning functions
|
|
SAVEPOINT settings;
|
|
-- multiple subqueries under a single Gather node
|
|
-- must set parallel_setup_cost > 0 to discourage multiple Gather nodes
|
|
SET LOCAL parallel_setup_cost = 10;
|
|
EXPLAIN (COSTS OFF)
|
|
SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1
|
|
UNION ALL
|
|
SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1;
|
|
QUERY PLAN
|
|
----------------------------------------------------
|
|
Gather
|
|
Workers Planned: 4
|
|
-> Parallel Append
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (fivethous = (tenthous + 1))
|
|
-> Parallel Seq Scan on tenk1 tenk1_1
|
|
Filter: (fivethous = (tenthous + 1))
|
|
(7 rows)
|
|
|
|
ROLLBACK TO SAVEPOINT settings;
|
|
-- can't use multiple subqueries under a single Gather node due to initPlans
|
|
EXPLAIN (COSTS OFF)
|
|
SELECT unique1 FROM tenk1 WHERE fivethous =
|
|
(SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1)
|
|
UNION ALL
|
|
SELECT unique1 FROM tenk1 WHERE fivethous =
|
|
(SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1)
|
|
ORDER BY 1;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------
|
|
Sort
|
|
Sort Key: tenk1.unique1
|
|
-> Append
|
|
-> Gather
|
|
Workers Planned: 4
|
|
InitPlan 1
|
|
-> Limit
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Seq Scan on tenk1 tenk1_2
|
|
Filter: (fivethous = 1)
|
|
-> Parallel Seq Scan on tenk1
|
|
Filter: (fivethous = (InitPlan 1).col1)
|
|
-> Gather
|
|
Workers Planned: 4
|
|
InitPlan 2
|
|
-> Limit
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Seq Scan on tenk1 tenk1_3
|
|
Filter: (fivethous = 1)
|
|
-> Parallel Seq Scan on tenk1 tenk1_1
|
|
Filter: (fivethous = (InitPlan 2).col1)
|
|
(23 rows)
|
|
|
|
-- test interaction with SRFs
|
|
SELECT * FROM information_schema.foreign_data_wrapper_options
|
|
ORDER BY 1, 2, 3;
|
|
foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value
|
|
------------------------------+---------------------------+-------------+--------------
|
|
(0 rows)
|
|
|
|
EXPLAIN (VERBOSE, COSTS OFF)
|
|
SELECT generate_series(1, two), array(select generate_series(1, two))
|
|
FROM tenk1 ORDER BY tenthous;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------------
|
|
ProjectSet
|
|
Output: generate_series(1, tenk1.two), ARRAY(SubPlan 1), tenk1.tenthous
|
|
-> Gather Merge
|
|
Output: tenk1.two, tenk1.tenthous
|
|
Workers Planned: 4
|
|
-> Result
|
|
Output: tenk1.two, tenk1.tenthous
|
|
-> Sort
|
|
Output: tenk1.tenthous, tenk1.two
|
|
Sort Key: tenk1.tenthous
|
|
-> Parallel Seq Scan on public.tenk1
|
|
Output: tenk1.tenthous, tenk1.two
|
|
SubPlan 1
|
|
-> ProjectSet
|
|
Output: generate_series(1, tenk1.two)
|
|
-> Result
|
|
(16 rows)
|
|
|
|
-- must disallow pushing sort below gather when pathkey contains an SRF
|
|
EXPLAIN (VERBOSE, COSTS OFF)
|
|
SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey
|
|
FROM tenk1 t1 JOIN tenk1 t2 ON TRUE
|
|
ORDER BY pathkey;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------
|
|
Sort
|
|
Output: (((unnest('{}'::integer[])) + 1))
|
|
Sort Key: (((unnest('{}'::integer[])) + 1))
|
|
-> Result
|
|
Output: ((unnest('{}'::integer[])) + 1)
|
|
-> ProjectSet
|
|
Output: unnest('{}'::integer[])
|
|
-> Nested Loop
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t1
|
|
-> Materialize
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t2
|
|
(15 rows)
|
|
|
|
-- test passing expanded-value representations to workers
|
|
CREATE FUNCTION make_some_array(int,int) returns int[] as
|
|
$$declare x int[];
|
|
begin
|
|
x[1] := $1;
|
|
x[2] := $2;
|
|
return x;
|
|
end$$ language plpgsql parallel safe;
|
|
CREATE TABLE fooarr(f1 text, f2 int[], f3 text);
|
|
INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one');
|
|
PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2;
|
|
EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2));
|
|
QUERY PLAN
|
|
------------------------------------------------------------------
|
|
Gather
|
|
Workers Planned: 3
|
|
-> Parallel Seq Scan on fooarr
|
|
Filter: ((f1 = '1'::text) AND (f2 = '{1,2}'::integer[]))
|
|
(4 rows)
|
|
|
|
EXECUTE pstmt('1', make_some_array(1,2));
|
|
f1 | f2 | f3
|
|
----+-------+-----
|
|
1 | {1,2} | one
|
|
(1 row)
|
|
|
|
DEALLOCATE pstmt;
|
|
-- test interaction between subquery and partial_paths
|
|
CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1;
|
|
EXPLAIN (COSTS OFF)
|
|
SELECT 1 FROM tenk1_vw_sec
|
|
WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100;
|
|
QUERY PLAN
|
|
-------------------------------------------------------------------
|
|
Subquery Scan on tenk1_vw_sec
|
|
Filter: ((SubPlan 1) < 100)
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Parallel Index Only Scan using tenk1_unique1 on tenk1
|
|
SubPlan 1
|
|
-> Aggregate
|
|
-> Seq Scan on int4_tbl
|
|
Filter: (f1 < tenk1_vw_sec.unique1)
|
|
(9 rows)
|
|
|
|
rollback;
|