Adjust design of per-worker parallel seqscan data struct

The design of the data structures which allow storage of the per-worker
memory during parallel seq scans were not ideal. The work done in
56788d215 required an additional data structure to allow workers to
remember the range of pages that had been allocated to them for
processing during a parallel seqscan.  That commit added a void pointer
field to TableScanDescData to allow heapam to store the per-worker
allocation information.  However putting the field there made very little
sense given that we have AM specific structs for that, e.g.
HeapScanDescData.

Here we remove the void pointer field from TableScanDescData and add a
dedicated field for this purpose to HeapScanDescData.

Previously we also allocated memory for this parallel per-worker data for
all scans, regardless if it was a parallel scan or not.  This was just a
wasted allocation for non-parallel scans, so here we make the allocation
conditional on the scan being parallel.

Also, add previously missing pfree() to free the per-worker data in
heap_endscan().

Reported-by: Andres Freund
Reviewed-by: Andres Freund
Discussion: https://postgr.es/m/20210317023101.anvejcfotwka6gaa@alap3.anarazel.de
This commit is contained in:
David Rowley 2021-03-30 10:17:09 +13:00
parent 6d7a6feac4
commit af527705ed
3 changed files with 22 additions and 7 deletions

View File

@ -540,7 +540,7 @@ heapgettup(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
(ParallelBlockTableScanWorker) scan->rs_base.rs_private;
scan->rs_parallelworkerdata;
table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@ -748,7 +748,7 @@ heapgettup(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
(ParallelBlockTableScanWorker) scan->rs_base.rs_private;
scan->rs_parallelworkerdata;
page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@ -864,7 +864,7 @@ heapgettup_pagemode(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
(ParallelBlockTableScanWorker) scan->rs_base.rs_private;
scan->rs_parallelworkerdata;
table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@ -1057,7 +1057,7 @@ heapgettup_pagemode(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
(ParallelBlockTableScanWorker) scan->rs_base.rs_private;
scan->rs_parallelworkerdata;
page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@ -1194,8 +1194,6 @@ heap_beginscan(Relation relation, Snapshot snapshot,
scan->rs_base.rs_nkeys = nkeys;
scan->rs_base.rs_flags = flags;
scan->rs_base.rs_parallel = parallel_scan;
scan->rs_base.rs_private =
palloc(sizeof(ParallelBlockTableScanWorkerData));
scan->rs_strategy = NULL; /* set in initscan */
/*
@ -1231,6 +1229,15 @@ heap_beginscan(Relation relation, Snapshot snapshot,
/* we only need to set this up once */
scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
/*
* Allocate memory to keep track of page allocation for parallel workers
* when doing a parallel scan.
*/
if (parallel_scan != NULL)
scan->rs_parallelworkerdata = palloc(sizeof(ParallelBlockTableScanWorkerData));
else
scan->rs_parallelworkerdata = NULL;
/*
* we do this here instead of in initscan() because heap_rescan also calls
* initscan() and we don't want to allocate memory again
@ -1306,6 +1313,9 @@ heap_endscan(TableScanDesc sscan)
if (scan->rs_strategy != NULL)
FreeAccessStrategy(scan->rs_strategy);
if (scan->rs_parallelworkerdata != NULL)
pfree(scan->rs_parallelworkerdata);
if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
UnregisterSnapshot(scan->rs_base.rs_snapshot);

View File

@ -65,6 +65,12 @@ typedef struct HeapScanDescData
HeapTupleData rs_ctup; /* current tuple in scan, if any */
/*
* For parallel scans to store page allocation data. NULL when not
* performing a parallel scan.
*/
ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
/* these fields only used in page-at-a-time mode and for bitmap scans */
int rs_cindex; /* current tuple's index in vistuples */
int rs_ntuples; /* number of visible tuples on page */

View File

@ -46,7 +46,6 @@ typedef struct TableScanDescData
*/
uint32 rs_flags;
void *rs_private; /* per-worker private memory for AM to use */
struct ParallelTableScanDescData *rs_parallel; /* parallel scan
* information */
} TableScanDescData;