Refactor hash_agg_entry_size().
Consolidate the calculations for hash table size estimation. This will help with upcoming Hash Aggregation work that will add additional call sites.
This commit is contained in:
parent
c02fdc9223
commit
7d4395d0a1
|
@ -1422,24 +1422,17 @@ find_hash_columns(AggState *aggstate)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Estimate per-hash-table-entry overhead for the planner.
|
* Estimate per-hash-table-entry overhead.
|
||||||
*
|
|
||||||
* Note that the estimate does not include space for pass-by-reference
|
|
||||||
* transition data values, nor for the representative tuple of each group.
|
|
||||||
* Nor does this account of the target fill-factor and growth policy of the
|
|
||||||
* hash table.
|
|
||||||
*/
|
*/
|
||||||
Size
|
Size
|
||||||
hash_agg_entry_size(int numAggs)
|
hash_agg_entry_size(int numAggs, Size tupleWidth, Size transitionSpace)
|
||||||
{
|
{
|
||||||
Size entrysize;
|
return
|
||||||
|
MAXALIGN(SizeofMinimalTupleHeader) +
|
||||||
/* This must match build_hash_table */
|
MAXALIGN(tupleWidth) +
|
||||||
entrysize = sizeof(TupleHashEntryData) +
|
MAXALIGN(sizeof(TupleHashEntryData) +
|
||||||
numAggs * sizeof(AggStatePerGroupData);
|
numAggs * sizeof(AggStatePerGroupData)) +
|
||||||
entrysize = MAXALIGN(entrysize);
|
transitionSpace;
|
||||||
|
|
||||||
return entrysize;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -4867,13 +4867,8 @@ create_distinct_paths(PlannerInfo *root,
|
||||||
allow_hash = false; /* policy-based decision not to hash */
|
allow_hash = false; /* policy-based decision not to hash */
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Size hashentrysize;
|
Size hashentrysize = hash_agg_entry_size(
|
||||||
|
0, cheapest_input_path->pathtarget->width, 0);
|
||||||
/* Estimate per-hash-entry space at tuple width... */
|
|
||||||
hashentrysize = MAXALIGN(cheapest_input_path->pathtarget->width) +
|
|
||||||
MAXALIGN(SizeofMinimalTupleHeader);
|
|
||||||
/* plus the per-hash-entry overhead */
|
|
||||||
hashentrysize += hash_agg_entry_size(0);
|
|
||||||
|
|
||||||
/* Allow hashing only if hashtable is predicted to fit in work_mem */
|
/* Allow hashing only if hashtable is predicted to fit in work_mem */
|
||||||
allow_hash = (hashentrysize * numDistinctRows <= work_mem * 1024L);
|
allow_hash = (hashentrysize * numDistinctRows <= work_mem * 1024L);
|
||||||
|
|
|
@ -3526,16 +3526,8 @@ double
|
||||||
estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs,
|
estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs,
|
||||||
double dNumGroups)
|
double dNumGroups)
|
||||||
{
|
{
|
||||||
Size hashentrysize;
|
Size hashentrysize = hash_agg_entry_size(
|
||||||
|
agg_costs->numAggs, path->pathtarget->width, agg_costs->transitionSpace);
|
||||||
/* Estimate per-hash-entry space at tuple width... */
|
|
||||||
hashentrysize = MAXALIGN(path->pathtarget->width) +
|
|
||||||
MAXALIGN(SizeofMinimalTupleHeader);
|
|
||||||
|
|
||||||
/* plus space for pass-by-ref transition values... */
|
|
||||||
hashentrysize += agg_costs->transitionSpace;
|
|
||||||
/* plus the per-hash-entry overhead */
|
|
||||||
hashentrysize += hash_agg_entry_size(agg_costs->numAggs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that this disregards the effect of fill-factor and growth policy
|
* Note that this disregards the effect of fill-factor and growth policy
|
||||||
|
|
|
@ -309,6 +309,7 @@ extern AggState *ExecInitAgg(Agg *node, EState *estate, int eflags);
|
||||||
extern void ExecEndAgg(AggState *node);
|
extern void ExecEndAgg(AggState *node);
|
||||||
extern void ExecReScanAgg(AggState *node);
|
extern void ExecReScanAgg(AggState *node);
|
||||||
|
|
||||||
extern Size hash_agg_entry_size(int numAggs);
|
extern Size hash_agg_entry_size(int numAggs, Size tupleWidth,
|
||||||
|
Size transitionSpace);
|
||||||
|
|
||||||
#endif /* NODEAGG_H */
|
#endif /* NODEAGG_H */
|
||||||
|
|
Loading…
Reference in New Issue