mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-09-30 14:01:27 +02:00
9e7ccd9ef6
Pass CURSOR_OPT_PARALLEL_OK to pg_plan_query() so that parallel plans
are considered when running the underlying SELECT query. This wasn't
done in commit e9baa5e9
, which did this for CREATE MATERIALIZED VIEW,
because it wasn't yet known to be safe.
Since REFRESH always inserts into a freshly created table before later
merging or swapping the data into place with separate operations, we can
enable such plans here too.
Author: Bharath Rupireddy <bharath.rupireddyforpostgres@gmail.com>
Reviewed-by: Hou, Zhijie <houzj.fnst@cn.fujitsu.com>
Reviewed-by: Luc Vlaming <luc@swarm64.com>
Reviewed-by: Thomas Munro <thomas.munro@gmail.com>
Discussion: https://postgr.es/m/CALj2ACXg-4hNKJC6nFnepRHYT4t5jJVstYvri%2BtKQHy7ydcr8A%40mail.gmail.com
81 lines
2.8 KiB
Plaintext
81 lines
2.8 KiB
Plaintext
--
|
|
-- PARALLEL
|
|
--
|
|
begin;
|
|
-- encourage use of parallel plans
|
|
set parallel_setup_cost=0;
|
|
set parallel_tuple_cost=0;
|
|
set min_parallel_table_scan_size=0;
|
|
set max_parallel_workers_per_gather=4;
|
|
--
|
|
-- Test write operations that has an underlying query that is eligible
|
|
-- for parallel plans
|
|
--
|
|
explain (costs off) create table parallel_write as
|
|
select length(stringu1) from tenk1 group by length(stringu1);
|
|
QUERY PLAN
|
|
---------------------------------------------------
|
|
Finalize HashAggregate
|
|
Group Key: (length((stringu1)::text))
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial HashAggregate
|
|
Group Key: length((stringu1)::text)
|
|
-> Parallel Seq Scan on tenk1
|
|
(7 rows)
|
|
|
|
create table parallel_write as
|
|
select length(stringu1) from tenk1 group by length(stringu1);
|
|
drop table parallel_write;
|
|
explain (costs off) select length(stringu1) into parallel_write
|
|
from tenk1 group by length(stringu1);
|
|
QUERY PLAN
|
|
---------------------------------------------------
|
|
Finalize HashAggregate
|
|
Group Key: (length((stringu1)::text))
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial HashAggregate
|
|
Group Key: length((stringu1)::text)
|
|
-> Parallel Seq Scan on tenk1
|
|
(7 rows)
|
|
|
|
select length(stringu1) into parallel_write
|
|
from tenk1 group by length(stringu1);
|
|
drop table parallel_write;
|
|
explain (costs off) create materialized view parallel_mat_view as
|
|
select length(stringu1) from tenk1 group by length(stringu1);
|
|
QUERY PLAN
|
|
---------------------------------------------------
|
|
Finalize HashAggregate
|
|
Group Key: (length((stringu1)::text))
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial HashAggregate
|
|
Group Key: length((stringu1)::text)
|
|
-> Parallel Seq Scan on tenk1
|
|
(7 rows)
|
|
|
|
create materialized view parallel_mat_view as
|
|
select length(stringu1) from tenk1 group by length(stringu1);
|
|
create unique index on parallel_mat_view(length);
|
|
refresh materialized view parallel_mat_view;
|
|
refresh materialized view concurrently parallel_mat_view;
|
|
drop materialized view parallel_mat_view;
|
|
prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1);
|
|
explain (costs off) create table parallel_write as execute prep_stmt;
|
|
QUERY PLAN
|
|
---------------------------------------------------
|
|
Finalize HashAggregate
|
|
Group Key: (length((stringu1)::text))
|
|
-> Gather
|
|
Workers Planned: 4
|
|
-> Partial HashAggregate
|
|
Group Key: length((stringu1)::text)
|
|
-> Parallel Seq Scan on tenk1
|
|
(7 rows)
|
|
|
|
create table parallel_write as execute prep_stmt;
|
|
drop table parallel_write;
|
|
rollback;
|