diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index baa669abe8..5e4d02ce4a 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -129,19 +129,17 @@ ExecLimit(PlanState *pstate) * we are at the end of the window, return NULL without * advancing the subplan or the position variable; but change * the state machine state to record having done so. + * + * Once at the end, ideally, we can shut down parallel + * resources but that would destroy the parallel context which + * would be required for rescans. To do that, we need to find + * a way to pass down more information about whether rescans + * are possible. */ if (!node->noCount && node->position - node->offset >= node->count) { node->lstate = LIMIT_WINDOWEND; - - /* - * If we know we won't need to back up, we can release - * resources at this point. - */ - if (!(node->ps.state->es_top_eflags & EXEC_FLAG_BACKWARD)) - (void) ExecShutdownNode(outerPlan); - return NULL; } diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out index 0eca76cb41..191ffe7e23 100644 --- a/src/test/regress/expected/select_parallel.out +++ b/src/test/regress/expected/select_parallel.out @@ -455,9 +455,48 @@ select * from 9000 | 3 (3 rows) -reset enable_material; +-- test rescans for a Limit node with a parallel node beneath it. reset enable_seqscan; +set enable_indexonlyscan to off; +set enable_indexscan to off; +alter table tenk1 set (parallel_workers = 0); +alter table tenk2 set (parallel_workers = 1); +explain (costs off) +select count(*) from tenk1 + left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss + on tenk1.unique1 < ss.unique1 + 1 + where tenk1.unique1 < 2; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: (tenk1.unique1 < (tenk2.unique1 + 1)) + -> Seq Scan on tenk1 + Filter: (unique1 < 2) + -> Limit + -> Gather Merge + Workers Planned: 1 + -> Sort + Sort Key: tenk2.unique1 + -> Parallel Seq Scan on tenk2 +(11 rows) + +select count(*) from tenk1 + left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss + on tenk1.unique1 < ss.unique1 + 1 + where tenk1.unique1 < 2; + count +------- + 1999 +(1 row) + +--reset the value of workers for each table as it was before this test. +alter table tenk1 set (parallel_workers = 4); +alter table tenk2 reset (parallel_workers); +reset enable_material; reset enable_bitmapscan; +reset enable_indexonlyscan; +reset enable_indexscan; -- test parallel bitmap heap scan. set enable_seqscan to off; set enable_indexscan to off; diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql index 03c056b8b7..11e7735533 100644 --- a/src/test/regress/sql/select_parallel.sql +++ b/src/test/regress/sql/select_parallel.sql @@ -168,9 +168,29 @@ select * from (select count(*) from tenk1 where thousand > 99) ss right join (values (1),(2),(3)) v(x) on true; -reset enable_material; +-- test rescans for a Limit node with a parallel node beneath it. reset enable_seqscan; +set enable_indexonlyscan to off; +set enable_indexscan to off; +alter table tenk1 set (parallel_workers = 0); +alter table tenk2 set (parallel_workers = 1); +explain (costs off) +select count(*) from tenk1 + left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss + on tenk1.unique1 < ss.unique1 + 1 + where tenk1.unique1 < 2; +select count(*) from tenk1 + left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss + on tenk1.unique1 < ss.unique1 + 1 + where tenk1.unique1 < 2; +--reset the value of workers for each table as it was before this test. +alter table tenk1 set (parallel_workers = 4); +alter table tenk2 reset (parallel_workers); + +reset enable_material; reset enable_bitmapscan; +reset enable_indexonlyscan; +reset enable_indexscan; -- test parallel bitmap heap scan. set enable_seqscan to off;