Avoid query-lifetime memory leaks in XMLTABLE (bug #15321)

Multiple calls to XMLTABLE in a query (e.g. laterally applying it to a
table with an xml column, an important use-case) were leaking large
amounts of memory into the per-query context, blowing up memory usage.

Repair by reorganizing memory context usage in nodeTableFuncscan; use
the usual per-tuple context for row-by-row evaluations instead of
perValueCxt, and use the explicitly created context -- renamed from
perValueCxt to perTableCxt -- for arguments and state for each
individual table-generation operation.

Backpatch to PG10 where this code was introduced.

Original report by IRC user begriffs; analysis and patch by me.
Reviewed by Tom Lane and Pavel Stehule.

Discussion: https://postgr.es/m/153394403528.10284.7530399040974170549@wrigleys.postgresql.org
This commit is contained in:
Andrew Gierth 2018-08-13 01:45:35 +01:00
parent 46b5e7c4b5
commit 07172d5aff
2 changed files with 25 additions and 6 deletions

View File

@ -164,7 +164,7 @@ ExecInitTableFuncScan(TableFuncScan *node, EState *estate, int eflags)
/* Only XMLTABLE is supported currently */
scanstate->routine = &XmlTableRoutine;
scanstate->perValueCxt =
scanstate->perTableCxt =
AllocSetContextCreate(CurrentMemoryContext,
"TableFunc per value context",
ALLOCSET_DEFAULT_SIZES);
@ -282,6 +282,16 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext)
oldcxt = MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tstate->tupstore = tuplestore_begin_heap(false, false, work_mem);
/*
* Each call to fetch a new set of rows - of which there may be very many
* if XMLTABLE is being used in a lateral join - will allocate a possibly
* substantial amount of memory, so we cannot use the per-query context
* here. perTableCxt now serves the same function as "argcontext" does in
* FunctionScan - a place to store per-one-call (i.e. one result table)
* lifetime data (as opposed to per-query or per-result-tuple).
*/
MemoryContextSwitchTo(tstate->perTableCxt);
PG_TRY();
{
routine->InitOpaque(tstate,
@ -313,8 +323,7 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext)
}
PG_END_TRY();
/* return to original memory context, and clean up */
MemoryContextSwitchTo(oldcxt);
/* clean up and return to original memory context */
if (tstate->opaque != NULL)
{
@ -322,6 +331,9 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext)
tstate->opaque = NULL;
}
MemoryContextSwitchTo(oldcxt);
MemoryContextReset(tstate->perTableCxt);
return;
}
@ -428,7 +440,14 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
ordinalitycol =
((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
oldcxt = MemoryContextSwitchTo(tstate->perValueCxt);
/*
* We need a short-lived memory context that we can clean up each time
* around the loop, to avoid wasting space. Our default per-tuple context
* is fine for the job, since we won't have used it for anything yet in
* this tuple cycle.
*/
oldcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
* Keep requesting rows from the table builder until there aren't any.
@ -493,7 +512,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
tuplestore_putvalues(tstate->tupstore, tupdesc, values, nulls);
MemoryContextReset(tstate->perValueCxt);
MemoryContextReset(econtext->ecxt_per_tuple_memory);
}
MemoryContextSwitchTo(oldcxt);

View File

@ -1580,7 +1580,7 @@ typedef struct TableFuncScanState
FmgrInfo *in_functions; /* input function for each column */
Oid *typioparams; /* typioparam for each column */
int64 ordinal; /* row number to be output next */
MemoryContext perValueCxt; /* short life context for value evaluation */
MemoryContext perTableCxt; /* per-table context */
Tuplestorestate *tupstore; /* output tuple store */
} TableFuncScanState;