diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index e5bbf3b0af..ed25e7a743 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -1602,7 +1602,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL 20 | 0 | AAA020 (10 rows) -SET enable_resultcache TO off; +SET enable_memoize TO off; -- right outer join + left outer join EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; @@ -1629,7 +1629,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT 20 | 0 | AAA020 (10 rows) -RESET enable_resultcache; +RESET enable_memoize; -- left outer join + right outer join EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; @@ -2149,7 +2149,7 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM Output: t1."C 1" -> Index Scan using t1_pkey on "S 1"."T 1" t1 Output: t1."C 1", t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 - -> Result Cache + -> Memoize Cache Key: t1.c2 -> Subquery Scan on q -> HashAggregate diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index fe503ed6c3..02a6b15a13 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -502,12 +502,12 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; -SET enable_resultcache TO off; +SET enable_memoize TO off; -- right outer join + left outer join EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; -RESET enable_resultcache; +RESET enable_memoize; -- left outer join + right outer join EXPLAIN (VERBOSE, COSTS OFF) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index d1b889b80f..43772c2a98 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -5018,15 +5018,15 @@ ANY num_sync ( - enable_resultcache (boolean) + + enable_memoize (boolean) - enable_resultcache configuration parameter + enable_memoize configuration parameter - Enables or disables the query planner's use of result cache plans for + Enables or disables the query planner's use of memoize plans for caching results from parameterized scans inside nested-loop joins. This plan type allows scans to the underlying plans to be skipped when the results for the current parameters are already in the cache. Less diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index e81b990092..340db2bac4 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -109,8 +109,8 @@ static void show_sort_info(SortState *sortstate, ExplainState *es); static void show_incremental_sort_info(IncrementalSortState *incrsortstate, ExplainState *es); static void show_hash_info(HashState *hashstate, ExplainState *es); -static void show_resultcache_info(ResultCacheState *rcstate, List *ancestors, - ExplainState *es); +static void show_memoize_info(MemoizeState *mstate, List *ancestors, + ExplainState *es); static void show_hashagg_info(AggState *hashstate, ExplainState *es); static void show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es); @@ -1298,8 +1298,8 @@ ExplainNode(PlanState *planstate, List *ancestors, case T_Material: pname = sname = "Materialize"; break; - case T_ResultCache: - pname = sname = "Result Cache"; + case T_Memoize: + pname = sname = "Memoize"; break; case T_Sort: pname = sname = "Sort"; @@ -2013,9 +2013,9 @@ ExplainNode(PlanState *planstate, List *ancestors, case T_Hash: show_hash_info(castNode(HashState, planstate), es); break; - case T_ResultCache: - show_resultcache_info(castNode(ResultCacheState, planstate), - ancestors, es); + case T_Memoize: + show_memoize_info(castNode(MemoizeState, planstate), ancestors, + es); break; default: break; @@ -3085,13 +3085,12 @@ show_hash_info(HashState *hashstate, ExplainState *es) } /* - * Show information on result cache hits/misses/evictions and memory usage. + * Show information on memoize hits/misses/evictions and memory usage. */ static void -show_resultcache_info(ResultCacheState *rcstate, List *ancestors, - ExplainState *es) +show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es) { - Plan *plan = ((PlanState *) rcstate)->plan; + Plan *plan = ((PlanState *) mstate)->plan; ListCell *lc; List *context; StringInfoData keystr; @@ -3102,7 +3101,7 @@ show_resultcache_info(ResultCacheState *rcstate, List *ancestors, initStringInfo(&keystr); /* - * It's hard to imagine having a result cache with fewer than 2 RTEs, but + * It's hard to imagine having a memoize node with fewer than 2 RTEs, but * let's just keep the same useprefix logic as elsewhere in this file. */ useprefix = list_length(es->rtable) > 1 || es->verbose; @@ -3112,7 +3111,7 @@ show_resultcache_info(ResultCacheState *rcstate, List *ancestors, plan, ancestors); - foreach(lc, ((ResultCache *) plan)->param_exprs) + foreach(lc, ((Memoize *) plan)->param_exprs) { Node *expr = (Node *) lfirst(lc); @@ -3138,23 +3137,23 @@ show_resultcache_info(ResultCacheState *rcstate, List *ancestors, if (!es->analyze) return; - if (rcstate->stats.cache_misses > 0) + if (mstate->stats.cache_misses > 0) { /* * mem_peak is only set when we freed memory, so we must use mem_used * when mem_peak is 0. */ - if (rcstate->stats.mem_peak > 0) - memPeakKb = (rcstate->stats.mem_peak + 1023) / 1024; + if (mstate->stats.mem_peak > 0) + memPeakKb = (mstate->stats.mem_peak + 1023) / 1024; else - memPeakKb = (rcstate->mem_used + 1023) / 1024; + memPeakKb = (mstate->mem_used + 1023) / 1024; if (es->format != EXPLAIN_FORMAT_TEXT) { - ExplainPropertyInteger("Cache Hits", NULL, rcstate->stats.cache_hits, es); - ExplainPropertyInteger("Cache Misses", NULL, rcstate->stats.cache_misses, es); - ExplainPropertyInteger("Cache Evictions", NULL, rcstate->stats.cache_evictions, es); - ExplainPropertyInteger("Cache Overflows", NULL, rcstate->stats.cache_overflows, es); + ExplainPropertyInteger("Cache Hits", NULL, mstate->stats.cache_hits, es); + ExplainPropertyInteger("Cache Misses", NULL, mstate->stats.cache_misses, es); + ExplainPropertyInteger("Cache Evictions", NULL, mstate->stats.cache_evictions, es); + ExplainPropertyInteger("Cache Overflows", NULL, mstate->stats.cache_overflows, es); ExplainPropertyInteger("Peak Memory Usage", "kB", memPeakKb, es); } else @@ -3162,23 +3161,23 @@ show_resultcache_info(ResultCacheState *rcstate, List *ancestors, ExplainIndentText(es); appendStringInfo(es->str, "Hits: " UINT64_FORMAT " Misses: " UINT64_FORMAT " Evictions: " UINT64_FORMAT " Overflows: " UINT64_FORMAT " Memory Usage: " INT64_FORMAT "kB\n", - rcstate->stats.cache_hits, - rcstate->stats.cache_misses, - rcstate->stats.cache_evictions, - rcstate->stats.cache_overflows, + mstate->stats.cache_hits, + mstate->stats.cache_misses, + mstate->stats.cache_evictions, + mstate->stats.cache_overflows, memPeakKb); } } - if (rcstate->shared_info == NULL) + if (mstate->shared_info == NULL) return; /* Show details from parallel workers */ - for (int n = 0; n < rcstate->shared_info->num_workers; n++) + for (int n = 0; n < mstate->shared_info->num_workers; n++) { - ResultCacheInstrumentation *si; + MemoizeInstrumentation *si; - si = &rcstate->shared_info->sinstrument[n]; + si = &mstate->shared_info->sinstrument[n]; /* * Skip workers that didn't do any work. We needn't bother checking @@ -3191,10 +3190,10 @@ show_resultcache_info(ResultCacheState *rcstate, List *ancestors, ExplainOpenWorker(n, es); /* - * Since the worker's ResultCacheState.mem_used field is unavailable - * to us, ExecEndResultCache will have set the - * ResultCacheInstrumentation.mem_peak field for us. No need to do - * the zero checks like we did for the serial case above. + * Since the worker's MemoizeState.mem_used field is unavailable to + * us, ExecEndMemoize will have set the + * MemoizeInstrumentation.mem_peak field for us. No need to do the + * zero checks like we did for the serial case above. */ memPeakKb = (si->mem_peak + 1023) / 1024; diff --git a/src/backend/executor/Makefile b/src/backend/executor/Makefile index f08b282a5e..11118d0ce0 100644 --- a/src/backend/executor/Makefile +++ b/src/backend/executor/Makefile @@ -53,6 +53,7 @@ OBJS = \ nodeLimit.o \ nodeLockRows.o \ nodeMaterial.o \ + nodeMemoize.o \ nodeMergeAppend.o \ nodeMergejoin.o \ nodeModifyTable.o \ @@ -61,7 +62,6 @@ OBJS = \ nodeProjectSet.o \ nodeRecursiveunion.o \ nodeResult.o \ - nodeResultCache.o \ nodeSamplescan.o \ nodeSeqscan.o \ nodeSetOp.o \ diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 522b1c2086..d0c52a38b4 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -36,6 +36,7 @@ #include "executor/nodeLimit.h" #include "executor/nodeLockRows.h" #include "executor/nodeMaterial.h" +#include "executor/nodeMemoize.h" #include "executor/nodeMergeAppend.h" #include "executor/nodeMergejoin.h" #include "executor/nodeModifyTable.h" @@ -44,7 +45,6 @@ #include "executor/nodeProjectSet.h" #include "executor/nodeRecursiveunion.h" #include "executor/nodeResult.h" -#include "executor/nodeResultCache.h" #include "executor/nodeSamplescan.h" #include "executor/nodeSeqscan.h" #include "executor/nodeSetOp.h" @@ -255,8 +255,8 @@ ExecReScan(PlanState *node) ExecReScanMaterial((MaterialState *) node); break; - case T_ResultCacheState: - ExecReScanResultCache((ResultCacheState *) node); + case T_MemoizeState: + ExecReScanMemoize((MemoizeState *) node); break; case T_SortState: diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 12c41d746b..f8a4a40e7b 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -35,7 +35,7 @@ #include "executor/nodeIncrementalSort.h" #include "executor/nodeIndexonlyscan.h" #include "executor/nodeIndexscan.h" -#include "executor/nodeResultCache.h" +#include "executor/nodeMemoize.h" #include "executor/nodeSeqscan.h" #include "executor/nodeSort.h" #include "executor/nodeSubplan.h" @@ -293,9 +293,9 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e) /* even when not parallel-aware, for EXPLAIN ANALYZE */ ExecAggEstimate((AggState *) planstate, e->pcxt); break; - case T_ResultCacheState: + case T_MemoizeState: /* even when not parallel-aware, for EXPLAIN ANALYZE */ - ExecResultCacheEstimate((ResultCacheState *) planstate, e->pcxt); + ExecMemoizeEstimate((MemoizeState *) planstate, e->pcxt); break; default: break; @@ -517,9 +517,9 @@ ExecParallelInitializeDSM(PlanState *planstate, /* even when not parallel-aware, for EXPLAIN ANALYZE */ ExecAggInitializeDSM((AggState *) planstate, d->pcxt); break; - case T_ResultCacheState: + case T_MemoizeState: /* even when not parallel-aware, for EXPLAIN ANALYZE */ - ExecResultCacheInitializeDSM((ResultCacheState *) planstate, d->pcxt); + ExecMemoizeInitializeDSM((MemoizeState *) planstate, d->pcxt); break; default: break; @@ -997,7 +997,7 @@ ExecParallelReInitializeDSM(PlanState *planstate, case T_HashState: case T_SortState: case T_IncrementalSortState: - case T_ResultCacheState: + case T_MemoizeState: /* these nodes have DSM state, but no reinitialization is required */ break; @@ -1067,8 +1067,8 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, case T_AggState: ExecAggRetrieveInstrumentation((AggState *) planstate); break; - case T_ResultCacheState: - ExecResultCacheRetrieveInstrumentation((ResultCacheState *) planstate); + case T_MemoizeState: + ExecMemoizeRetrieveInstrumentation((MemoizeState *) planstate); break; default: break; @@ -1362,10 +1362,9 @@ ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt) /* even when not parallel-aware, for EXPLAIN ANALYZE */ ExecAggInitializeWorker((AggState *) planstate, pwcxt); break; - case T_ResultCacheState: + case T_MemoizeState: /* even when not parallel-aware, for EXPLAIN ANALYZE */ - ExecResultCacheInitializeWorker((ResultCacheState *) planstate, - pwcxt); + ExecMemoizeInitializeWorker((MemoizeState *) planstate, pwcxt); break; default: break; diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 753f46863b..1752b9bfd8 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -94,6 +94,7 @@ #include "executor/nodeLimit.h" #include "executor/nodeLockRows.h" #include "executor/nodeMaterial.h" +#include "executor/nodeMemoize.h" #include "executor/nodeMergeAppend.h" #include "executor/nodeMergejoin.h" #include "executor/nodeModifyTable.h" @@ -102,7 +103,6 @@ #include "executor/nodeProjectSet.h" #include "executor/nodeRecursiveunion.h" #include "executor/nodeResult.h" -#include "executor/nodeResultCache.h" #include "executor/nodeSamplescan.h" #include "executor/nodeSeqscan.h" #include "executor/nodeSetOp.h" @@ -326,9 +326,9 @@ ExecInitNode(Plan *node, EState *estate, int eflags) estate, eflags); break; - case T_ResultCache: - result = (PlanState *) ExecInitResultCache((ResultCache *) node, - estate, eflags); + case T_Memoize: + result = (PlanState *) ExecInitMemoize((Memoize *) node, estate, + eflags); break; case T_Group: @@ -720,8 +720,8 @@ ExecEndNode(PlanState *node) ExecEndIncrementalSort((IncrementalSortState *) node); break; - case T_ResultCacheState: - ExecEndResultCache((ResultCacheState *) node); + case T_MemoizeState: + ExecEndMemoize((MemoizeState *) node); break; case T_GroupState: diff --git a/src/backend/executor/nodeResultCache.c b/src/backend/executor/nodeMemoize.c similarity index 65% rename from src/backend/executor/nodeResultCache.c rename to src/backend/executor/nodeMemoize.c index 471900346f..2fde4ebce6 100644 --- a/src/backend/executor/nodeResultCache.c +++ b/src/backend/executor/nodeMemoize.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * nodeResultCache.c + * nodeMemoize.c * Routines to handle caching of results from parameterized nodes * * Portions Copyright (c) 2021, PostgreSQL Global Development Group @@ -8,9 +8,9 @@ * * * IDENTIFICATION - * src/backend/executor/nodeResultCache.c + * src/backend/executor/nodeMemoize.c * - * ResultCache nodes are intended to sit above parameterized nodes in the plan + * Memoize nodes are intended to sit above parameterized nodes in the plan * tree in order to cache results from them. The intention here is that a * repeat scan with a parameter value that has already been seen by the node * can fetch tuples from the cache rather than having to re-scan the outer @@ -43,24 +43,24 @@ * happens then we'll have already evicted all other cache entries. When * caching another tuple would cause us to exceed our memory budget, we must * free the entry that we're currently populating and move the state machine - * into RC_CACHE_BYPASS_MODE. This means that we'll not attempt to cache any - * further tuples for this particular scan. We don't have the memory for it. - * The state machine will be reset again on the next rescan. If the memory - * requirements to cache the next parameter's tuples are less demanding, then - * that may allow us to start putting useful entries back into the cache - * again. + * into MEMO_CACHE_BYPASS_MODE. This means that we'll not attempt to cache + * any further tuples for this particular scan. We don't have the memory for + * it. The state machine will be reset again on the next rescan. If the + * memory requirements to cache the next parameter's tuples are less + * demanding, then that may allow us to start putting useful entries back into + * the cache again. * * * INTERFACE ROUTINES - * ExecResultCache - lookup cache, exec subplan when not found - * ExecInitResultCache - initialize node and subnodes - * ExecEndResultCache - shutdown node and subnodes - * ExecReScanResultCache - rescan the result cache + * ExecMemoize - lookup cache, exec subplan when not found + * ExecInitMemoize - initialize node and subnodes + * ExecEndMemoize - shutdown node and subnodes + * ExecReScanMemoize - rescan the memoize node * - * ExecResultCacheEstimate estimates DSM space needed for parallel plan - * ExecResultCacheInitializeDSM initialize DSM for parallel plan - * ExecResultCacheInitializeWorker attach to DSM info in parallel worker - * ExecResultCacheRetrieveInstrumentation get instrumentation from worker + * ExecMemoizeEstimate estimates DSM space needed for parallel plan + * ExecMemoizeInitializeDSM initialize DSM for parallel plan + * ExecMemoizeInitializeWorker attach to DSM info in parallel worker + * ExecMemoizeRetrieveInstrumentation get instrumentation from worker *------------------------------------------------------------------------- */ @@ -68,79 +68,79 @@ #include "common/hashfn.h" #include "executor/executor.h" -#include "executor/nodeResultCache.h" +#include "executor/nodeMemoize.h" #include "lib/ilist.h" #include "miscadmin.h" #include "utils/lsyscache.h" -/* States of the ExecResultCache state machine */ -#define RC_CACHE_LOOKUP 1 /* Attempt to perform a cache lookup */ -#define RC_CACHE_FETCH_NEXT_TUPLE 2 /* Get another tuple from the cache */ -#define RC_FILLING_CACHE 3 /* Read outer node to fill cache */ -#define RC_CACHE_BYPASS_MODE 4 /* Bypass mode. Just read from our +/* States of the ExecMemoize state machine */ +#define MEMO_CACHE_LOOKUP 1 /* Attempt to perform a cache lookup */ +#define MEMO_CACHE_FETCH_NEXT_TUPLE 2 /* Get another tuple from the cache */ +#define MEMO_FILLING_CACHE 3 /* Read outer node to fill cache */ +#define MEMO_CACHE_BYPASS_MODE 4 /* Bypass mode. Just read from our * subplan without caching anything */ -#define RC_END_OF_SCAN 5 /* Ready for rescan */ +#define MEMO_END_OF_SCAN 5 /* Ready for rescan */ /* Helper macros for memory accounting */ -#define EMPTY_ENTRY_MEMORY_BYTES(e) (sizeof(ResultCacheEntry) + \ - sizeof(ResultCacheKey) + \ +#define EMPTY_ENTRY_MEMORY_BYTES(e) (sizeof(MemoizeEntry) + \ + sizeof(MemoizeKey) + \ (e)->key->params->t_len); -#define CACHE_TUPLE_BYTES(t) (sizeof(ResultCacheTuple) + \ +#define CACHE_TUPLE_BYTES(t) (sizeof(MemoizeTuple) + \ (t)->mintuple->t_len) - /* ResultCacheTuple Stores an individually cached tuple */ -typedef struct ResultCacheTuple + /* MemoizeTuple Stores an individually cached tuple */ +typedef struct MemoizeTuple { MinimalTuple mintuple; /* Cached tuple */ - struct ResultCacheTuple *next; /* The next tuple with the same parameter - * values or NULL if it's the last one */ -} ResultCacheTuple; + struct MemoizeTuple *next; /* The next tuple with the same parameter + * values or NULL if it's the last one */ +} MemoizeTuple; /* - * ResultCacheKey + * MemoizeKey * The hash table key for cached entries plus the LRU list link */ -typedef struct ResultCacheKey +typedef struct MemoizeKey { MinimalTuple params; dlist_node lru_node; /* Pointer to next/prev key in LRU list */ -} ResultCacheKey; +} MemoizeKey; /* - * ResultCacheEntry + * MemoizeEntry * The data struct that the cache hash table stores */ -typedef struct ResultCacheEntry +typedef struct MemoizeEntry { - ResultCacheKey *key; /* Hash key for hash table lookups */ - ResultCacheTuple *tuplehead; /* Pointer to the first tuple or NULL if - * no tuples are cached for this entry */ + MemoizeKey *key; /* Hash key for hash table lookups */ + MemoizeTuple *tuplehead; /* Pointer to the first tuple or NULL if no + * tuples are cached for this entry */ uint32 hash; /* Hash value (cached) */ char status; /* Hash status */ bool complete; /* Did we read the outer plan to completion? */ -} ResultCacheEntry; +} MemoizeEntry; -#define SH_PREFIX resultcache -#define SH_ELEMENT_TYPE ResultCacheEntry -#define SH_KEY_TYPE ResultCacheKey * +#define SH_PREFIX memoize +#define SH_ELEMENT_TYPE MemoizeEntry +#define SH_KEY_TYPE MemoizeKey * #define SH_SCOPE static inline #define SH_DECLARE #include "lib/simplehash.h" -static uint32 ResultCacheHash_hash(struct resultcache_hash *tb, - const ResultCacheKey *key); -static int ResultCacheHash_equal(struct resultcache_hash *tb, - const ResultCacheKey *params1, - const ResultCacheKey *params2); +static uint32 MemoizeHash_hash(struct memoize_hash *tb, + const MemoizeKey *key); +static int MemoizeHash_equal(struct memoize_hash *tb, + const MemoizeKey *params1, + const MemoizeKey *params2); -#define SH_PREFIX resultcache -#define SH_ELEMENT_TYPE ResultCacheEntry -#define SH_KEY_TYPE ResultCacheKey * +#define SH_PREFIX memoize +#define SH_ELEMENT_TYPE MemoizeEntry +#define SH_KEY_TYPE MemoizeKey * #define SH_KEY key -#define SH_HASH_KEY(tb, key) ResultCacheHash_hash(tb, key) -#define SH_EQUAL(tb, a, b) (ResultCacheHash_equal(tb, a, b) == 0) +#define SH_HASH_KEY(tb, key) MemoizeHash_hash(tb, key) +#define SH_EQUAL(tb, a, b) (MemoizeHash_equal(tb, a, b) == 0) #define SH_SCOPE static inline #define SH_STORE_HASH #define SH_GET_HASH(tb, a) a->hash @@ -148,20 +148,20 @@ static int ResultCacheHash_equal(struct resultcache_hash *tb, #include "lib/simplehash.h" /* - * ResultCacheHash_hash + * MemoizeHash_hash * Hash function for simplehash hashtable. 'key' is unused here as we - * require that all table lookups first populate the ResultCacheState's + * require that all table lookups first populate the MemoizeState's * probeslot with the key values to be looked up. */ static uint32 -ResultCacheHash_hash(struct resultcache_hash *tb, const ResultCacheKey *key) +MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key) { - ResultCacheState *rcstate = (ResultCacheState *) tb->private_data; - TupleTableSlot *pslot = rcstate->probeslot; + MemoizeState *mstate = (MemoizeState *) tb->private_data; + TupleTableSlot *pslot = mstate->probeslot; uint32 hashkey = 0; - int numkeys = rcstate->nkeys; - FmgrInfo *hashfunctions = rcstate->hashfunctions; - Oid *collations = rcstate->collations; + int numkeys = mstate->nkeys; + FmgrInfo *hashfunctions = mstate->hashfunctions; + Oid *collations = mstate->collations; for (int i = 0; i < numkeys; i++) { @@ -182,56 +182,54 @@ ResultCacheHash_hash(struct resultcache_hash *tb, const ResultCacheKey *key) } /* - * ResultCacheHash_equal + * MemoizeHash_equal * Equality function for confirming hash value matches during a hash - * table lookup. 'key2' is never used. Instead the ResultCacheState's + * table lookup. 'key2' is never used. Instead the MemoizeState's * probeslot is always populated with details of what's being looked up. */ static int -ResultCacheHash_equal(struct resultcache_hash *tb, const ResultCacheKey *key1, - const ResultCacheKey *key2) +MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1, + const MemoizeKey *key2) { - ResultCacheState *rcstate = (ResultCacheState *) tb->private_data; - ExprContext *econtext = rcstate->ss.ps.ps_ExprContext; - TupleTableSlot *tslot = rcstate->tableslot; - TupleTableSlot *pslot = rcstate->probeslot; + MemoizeState *mstate = (MemoizeState *) tb->private_data; + ExprContext *econtext = mstate->ss.ps.ps_ExprContext; + TupleTableSlot *tslot = mstate->tableslot; + TupleTableSlot *pslot = mstate->probeslot; /* probeslot should have already been prepared by prepare_probe_slot() */ - ExecStoreMinimalTuple(key1->params, tslot, false); econtext->ecxt_innertuple = tslot; econtext->ecxt_outertuple = pslot; - return !ExecQualAndReset(rcstate->cache_eq_expr, econtext); + return !ExecQualAndReset(mstate->cache_eq_expr, econtext); } /* * Initialize the hash table to empty. */ static void -build_hash_table(ResultCacheState *rcstate, uint32 size) +build_hash_table(MemoizeState *mstate, uint32 size) { /* Make a guess at a good size when we're not given a valid size. */ if (size == 0) size = 1024; - /* resultcache_create will convert the size to a power of 2 */ - rcstate->hashtable = resultcache_create(rcstate->tableContext, size, - rcstate); + /* memoize_create will convert the size to a power of 2 */ + mstate->hashtable = memoize_create(mstate->tableContext, size, mstate); } /* * prepare_probe_slot - * Populate rcstate's probeslot with the values from the tuple stored + * Populate mstate's probeslot with the values from the tuple stored * in 'key'. If 'key' is NULL, then perform the population by evaluating - * rcstate's param_exprs. + * mstate's param_exprs. */ static inline void -prepare_probe_slot(ResultCacheState *rcstate, ResultCacheKey *key) +prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key) { - TupleTableSlot *pslot = rcstate->probeslot; - TupleTableSlot *tslot = rcstate->tableslot; - int numKeys = rcstate->nkeys; + TupleTableSlot *pslot = mstate->probeslot; + TupleTableSlot *tslot = mstate->tableslot; + int numKeys = mstate->nkeys; ExecClearTuple(pslot); @@ -239,8 +237,8 @@ prepare_probe_slot(ResultCacheState *rcstate, ResultCacheKey *key) { /* Set the probeslot's values based on the current parameter values */ for (int i = 0; i < numKeys; i++) - pslot->tts_values[i] = ExecEvalExpr(rcstate->param_exprs[i], - rcstate->ss.ps.ps_ExprContext, + pslot->tts_values[i] = ExecEvalExpr(mstate->param_exprs[i], + mstate->ss.ps.ps_ExprContext, &pslot->tts_isnull[i]); } else @@ -262,14 +260,14 @@ prepare_probe_slot(ResultCacheState *rcstate, ResultCacheKey *key) * reflect the removal of the tuples. */ static inline void -entry_purge_tuples(ResultCacheState *rcstate, ResultCacheEntry *entry) +entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry) { - ResultCacheTuple *tuple = entry->tuplehead; + MemoizeTuple *tuple = entry->tuplehead; uint64 freed_mem = 0; while (tuple != NULL) { - ResultCacheTuple *next = tuple->next; + MemoizeTuple *next = tuple->next; freed_mem += CACHE_TUPLE_BYTES(tuple); @@ -284,7 +282,7 @@ entry_purge_tuples(ResultCacheState *rcstate, ResultCacheEntry *entry) entry->tuplehead = NULL; /* Update the memory accounting */ - rcstate->mem_used -= freed_mem; + mstate->mem_used -= freed_mem; } /* @@ -292,24 +290,24 @@ entry_purge_tuples(ResultCacheState *rcstate, ResultCacheEntry *entry) * Remove 'entry' from the cache and free memory used by it. */ static void -remove_cache_entry(ResultCacheState *rcstate, ResultCacheEntry *entry) +remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry) { - ResultCacheKey *key = entry->key; + MemoizeKey *key = entry->key; dlist_delete(&entry->key->lru_node); /* Remove all of the tuples from this entry */ - entry_purge_tuples(rcstate, entry); + entry_purge_tuples(mstate, entry); /* * Update memory accounting. entry_purge_tuples should have already * subtracted the memory used for each cached tuple. Here we just update * the amount used by the entry itself. */ - rcstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry); + mstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry); /* Remove the entry from the cache */ - resultcache_delete_item(rcstate->hashtable, entry); + memoize_delete_item(mstate->hashtable, entry); pfree(key->params); pfree(key); @@ -319,37 +317,36 @@ remove_cache_entry(ResultCacheState *rcstate, ResultCacheEntry *entry) * cache_reduce_memory * Evict older and less recently used items from the cache in order to * reduce the memory consumption back to something below the - * ResultCacheState's mem_limit. + * MemoizeState's mem_limit. * * 'specialkey', if not NULL, causes the function to return false if the entry * which the key belongs to is removed from the cache. */ static bool -cache_reduce_memory(ResultCacheState *rcstate, ResultCacheKey *specialkey) +cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey) { bool specialkey_intact = true; /* for now */ dlist_mutable_iter iter; uint64 evictions = 0; /* Update peak memory usage */ - if (rcstate->mem_used > rcstate->stats.mem_peak) - rcstate->stats.mem_peak = rcstate->mem_used; + if (mstate->mem_used > mstate->stats.mem_peak) + mstate->stats.mem_peak = mstate->mem_used; /* We expect only to be called when we've gone over budget on memory */ - Assert(rcstate->mem_used > rcstate->mem_limit); + Assert(mstate->mem_used > mstate->mem_limit); /* Start the eviction process starting at the head of the LRU list. */ - dlist_foreach_modify(iter, &rcstate->lru_list) + dlist_foreach_modify(iter, &mstate->lru_list) { - ResultCacheKey *key = dlist_container(ResultCacheKey, lru_node, - iter.cur); - ResultCacheEntry *entry; + MemoizeKey *key = dlist_container(MemoizeKey, lru_node, iter.cur); + MemoizeEntry *entry; /* * Populate the hash probe slot in preparation for looking up this LRU * entry. */ - prepare_probe_slot(rcstate, key); + prepare_probe_slot(mstate, key); /* * Ideally the LRU list pointers would be stored in the entry itself @@ -362,7 +359,7 @@ cache_reduce_memory(ResultCacheState *rcstate, ResultCacheKey *specialkey) * pointer to the key here, we must perform a hash table lookup to * find the entry that the key belongs to. */ - entry = resultcache_lookup(rcstate->hashtable, NULL); + entry = memoize_lookup(mstate->hashtable, NULL); /* A good spot to check for corruption of the table and LRU list. */ Assert(entry != NULL); @@ -383,23 +380,23 @@ cache_reduce_memory(ResultCacheState *rcstate, ResultCacheKey *specialkey) /* * Finally remove the entry. This will remove from the LRU list too. */ - remove_cache_entry(rcstate, entry); + remove_cache_entry(mstate, entry); evictions++; /* Exit if we've freed enough memory */ - if (rcstate->mem_used <= rcstate->mem_limit) + if (mstate->mem_used <= mstate->mem_limit) break; } - rcstate->stats.cache_evictions += evictions; /* Update Stats */ + mstate->stats.cache_evictions += evictions; /* Update Stats */ return specialkey_intact; } /* * cache_lookup - * Perform a lookup to see if we've already cached results based on the + * Perform a lookup to see if we've already cached tuples based on the * scan's current parameters. If we find an existing entry we move it to * the end of the LRU list, set *found to true then return it. If we * don't find an entry then we create a new one and add it to the end of @@ -409,21 +406,21 @@ cache_reduce_memory(ResultCacheState *rcstate, ResultCacheKey *specialkey) * * Callers can assume we'll never return NULL when *found is true. */ -static ResultCacheEntry * -cache_lookup(ResultCacheState *rcstate, bool *found) +static MemoizeEntry * +cache_lookup(MemoizeState *mstate, bool *found) { - ResultCacheKey *key; - ResultCacheEntry *entry; + MemoizeKey *key; + MemoizeEntry *entry; MemoryContext oldcontext; /* prepare the probe slot with the current scan parameters */ - prepare_probe_slot(rcstate, NULL); + prepare_probe_slot(mstate, NULL); /* * Add the new entry to the cache. No need to pass a valid key since the - * hash function uses rcstate's probeslot, which we populated above. + * hash function uses mstate's probeslot, which we populated above. */ - entry = resultcache_insert(rcstate->hashtable, NULL, found); + entry = memoize_insert(mstate->hashtable, NULL, found); if (*found) { @@ -431,19 +428,19 @@ cache_lookup(ResultCacheState *rcstate, bool *found) * Move existing entry to the tail of the LRU list to mark it as the * most recently used item. */ - dlist_move_tail(&rcstate->lru_list, &entry->key->lru_node); + dlist_move_tail(&mstate->lru_list, &entry->key->lru_node); return entry; } - oldcontext = MemoryContextSwitchTo(rcstate->tableContext); + oldcontext = MemoryContextSwitchTo(mstate->tableContext); /* Allocate a new key */ - entry->key = key = (ResultCacheKey *) palloc(sizeof(ResultCacheKey)); - key->params = ExecCopySlotMinimalTuple(rcstate->probeslot); + entry->key = key = (MemoizeKey *) palloc(sizeof(MemoizeKey)); + key->params = ExecCopySlotMinimalTuple(mstate->probeslot); /* Update the total cache memory utilization */ - rcstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry); + mstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry); /* Initialize this entry */ entry->complete = false; @@ -453,9 +450,9 @@ cache_lookup(ResultCacheState *rcstate, bool *found) * Since this is the most recently used entry, push this entry onto the * end of the LRU list. */ - dlist_push_tail(&rcstate->lru_list, &entry->key->lru_node); + dlist_push_tail(&mstate->lru_list, &entry->key->lru_node); - rcstate->last_tuple = NULL; + mstate->last_tuple = NULL; MemoryContextSwitchTo(oldcontext); @@ -463,7 +460,7 @@ cache_lookup(ResultCacheState *rcstate, bool *found) * If we've gone over our memory budget, then we'll free up some space in * the cache. */ - if (rcstate->mem_used > rcstate->mem_limit) + if (mstate->mem_used > mstate->mem_limit) { /* * Try to free up some memory. It's highly unlikely that we'll fail @@ -471,7 +468,7 @@ cache_lookup(ResultCacheState *rcstate, bool *found) * any tuples and we're able to remove any other entry to reduce the * memory consumption. */ - if (unlikely(!cache_reduce_memory(rcstate, key))) + if (unlikely(!cache_reduce_memory(mstate, key))) return NULL; /* @@ -482,16 +479,16 @@ cache_lookup(ResultCacheState *rcstate, bool *found) * happened by seeing if the entry is still in use and that the key * pointer matches our expected key. */ - if (entry->status != resultcache_SH_IN_USE || entry->key != key) + if (entry->status != memoize_SH_IN_USE || entry->key != key) { /* * We need to repopulate the probeslot as lookups performed during * the cache evictions above will have stored some other key. */ - prepare_probe_slot(rcstate, key); + prepare_probe_slot(mstate, key); /* Re-find the newly added entry */ - entry = resultcache_lookup(rcstate->hashtable, NULL); + entry = memoize_lookup(mstate->hashtable, NULL); Assert(entry != NULL); } } @@ -501,29 +498,29 @@ cache_lookup(ResultCacheState *rcstate, bool *found) /* * cache_store_tuple - * Add the tuple stored in 'slot' to the rcstate's current cache entry. + * Add the tuple stored in 'slot' to the mstate's current cache entry. * The cache entry must have already been made with cache_lookup(). - * rcstate's last_tuple field must point to the tail of rcstate->entry's + * mstate's last_tuple field must point to the tail of mstate->entry's * list of tuples. */ static bool -cache_store_tuple(ResultCacheState *rcstate, TupleTableSlot *slot) +cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot) { - ResultCacheTuple *tuple; - ResultCacheEntry *entry = rcstate->entry; + MemoizeTuple *tuple; + MemoizeEntry *entry = mstate->entry; MemoryContext oldcontext; Assert(slot != NULL); Assert(entry != NULL); - oldcontext = MemoryContextSwitchTo(rcstate->tableContext); + oldcontext = MemoryContextSwitchTo(mstate->tableContext); - tuple = (ResultCacheTuple *) palloc(sizeof(ResultCacheTuple)); + tuple = (MemoizeTuple *) palloc(sizeof(MemoizeTuple)); tuple->mintuple = ExecCopySlotMinimalTuple(slot); tuple->next = NULL; /* Account for the memory we just consumed */ - rcstate->mem_used += CACHE_TUPLE_BYTES(tuple); + mstate->mem_used += CACHE_TUPLE_BYTES(tuple); if (entry->tuplehead == NULL) { @@ -536,21 +533,21 @@ cache_store_tuple(ResultCacheState *rcstate, TupleTableSlot *slot) else { /* push this tuple onto the tail of the list */ - rcstate->last_tuple->next = tuple; + mstate->last_tuple->next = tuple; } - rcstate->last_tuple = tuple; + mstate->last_tuple = tuple; MemoryContextSwitchTo(oldcontext); /* * If we've gone over our memory budget then free up some space in the * cache. */ - if (rcstate->mem_used > rcstate->mem_limit) + if (mstate->mem_used > mstate->mem_limit) { - ResultCacheKey *key = entry->key; + MemoizeKey *key = entry->key; - if (!cache_reduce_memory(rcstate, key)) + if (!cache_reduce_memory(mstate, key)) return false; /* @@ -561,17 +558,16 @@ cache_store_tuple(ResultCacheState *rcstate, TupleTableSlot *slot) * happened by seeing if the entry is still in use and that the key * pointer matches our expected key. */ - if (entry->status != resultcache_SH_IN_USE || entry->key != key) + if (entry->status != memoize_SH_IN_USE || entry->key != key) { /* * We need to repopulate the probeslot as lookups performed during * the cache evictions above will have stored some other key. */ - prepare_probe_slot(rcstate, key); + prepare_probe_slot(mstate, key); /* Re-find the entry */ - rcstate->entry = entry = resultcache_lookup(rcstate->hashtable, - NULL); + mstate->entry = entry = memoize_lookup(mstate->hashtable, NULL); Assert(entry != NULL); } } @@ -580,17 +576,17 @@ cache_store_tuple(ResultCacheState *rcstate, TupleTableSlot *slot) } static TupleTableSlot * -ExecResultCache(PlanState *pstate) +ExecMemoize(PlanState *pstate) { - ResultCacheState *node = castNode(ResultCacheState, pstate); + MemoizeState *node = castNode(MemoizeState, pstate); PlanState *outerNode; TupleTableSlot *slot; - switch (node->rc_status) + switch (node->mstatus) { - case RC_CACHE_LOOKUP: + case MEMO_CACHE_LOOKUP: { - ResultCacheEntry *entry; + MemoizeEntry *entry; TupleTableSlot *outerslot; bool found; @@ -618,7 +614,7 @@ ExecResultCache(PlanState *pstate) /* * Set last_tuple and entry so that the state - * RC_CACHE_FETCH_NEXT_TUPLE can easily find the next + * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next * tuple for these parameters. */ node->last_tuple = entry->tuplehead; @@ -627,7 +623,7 @@ ExecResultCache(PlanState *pstate) /* Fetch the first cached tuple, if there is one */ if (entry->tuplehead) { - node->rc_status = RC_CACHE_FETCH_NEXT_TUPLE; + node->mstatus = MEMO_CACHE_FETCH_NEXT_TUPLE; slot = node->ss.ps.ps_ResultTupleSlot; ExecStoreMinimalTuple(entry->tuplehead->mintuple, @@ -637,7 +633,7 @@ ExecResultCache(PlanState *pstate) } /* The cache entry is void of any tuples. */ - node->rc_status = RC_END_OF_SCAN; + node->mstatus = MEMO_END_OF_SCAN; return NULL; } @@ -666,13 +662,13 @@ ExecResultCache(PlanState *pstate) * cache_lookup may have returned NULL due to failure to * free enough cache space, so ensure we don't do anything * here that assumes it worked. There's no need to go into - * bypass mode here as we're setting rc_status to end of + * bypass mode here as we're setting mstatus to end of * scan. */ if (likely(entry)) entry->complete = true; - node->rc_status = RC_END_OF_SCAN; + node->mstatus = MEMO_END_OF_SCAN; return NULL; } @@ -687,7 +683,7 @@ ExecResultCache(PlanState *pstate) { node->stats.cache_overflows += 1; /* stats update */ - node->rc_status = RC_CACHE_BYPASS_MODE; + node->mstatus = MEMO_CACHE_BYPASS_MODE; /* * No need to clear out last_tuple as we'll stay in bypass @@ -703,7 +699,7 @@ ExecResultCache(PlanState *pstate) * executed to completion. */ entry->complete = node->singlerow; - node->rc_status = RC_FILLING_CACHE; + node->mstatus = MEMO_FILLING_CACHE; } slot = node->ss.ps.ps_ResultTupleSlot; @@ -711,7 +707,7 @@ ExecResultCache(PlanState *pstate) return slot; } - case RC_CACHE_FETCH_NEXT_TUPLE: + case MEMO_CACHE_FETCH_NEXT_TUPLE: { /* We shouldn't be in this state if these are not set */ Assert(node->entry != NULL); @@ -723,7 +719,7 @@ ExecResultCache(PlanState *pstate) /* No more tuples in the cache */ if (node->last_tuple == NULL) { - node->rc_status = RC_END_OF_SCAN; + node->mstatus = MEMO_END_OF_SCAN; return NULL; } @@ -734,18 +730,18 @@ ExecResultCache(PlanState *pstate) return slot; } - case RC_FILLING_CACHE: + case MEMO_FILLING_CACHE: { TupleTableSlot *outerslot; - ResultCacheEntry *entry = node->entry; + MemoizeEntry *entry = node->entry; - /* entry should already have been set by RC_CACHE_LOOKUP */ + /* entry should already have been set by MEMO_CACHE_LOOKUP */ Assert(entry != NULL); /* - * When in the RC_FILLING_CACHE state, we've just had a cache - * miss and are populating the cache with the current scan - * tuples. + * When in the MEMO_FILLING_CACHE state, we've just had a + * cache miss and are populating the cache with the current + * scan tuples. */ outerNode = outerPlanState(node); outerslot = ExecProcNode(outerNode); @@ -753,7 +749,7 @@ ExecResultCache(PlanState *pstate) { /* No more tuples. Mark it as complete */ entry->complete = true; - node->rc_status = RC_END_OF_SCAN; + node->mstatus = MEMO_END_OF_SCAN; return NULL; } @@ -771,7 +767,7 @@ ExecResultCache(PlanState *pstate) /* Couldn't store it? Handle overflow */ node->stats.cache_overflows += 1; /* stats update */ - node->rc_status = RC_CACHE_BYPASS_MODE; + node->mstatus = MEMO_CACHE_BYPASS_MODE; /* * No need to clear out entry or last_tuple as we'll stay @@ -784,7 +780,7 @@ ExecResultCache(PlanState *pstate) return slot; } - case RC_CACHE_BYPASS_MODE: + case MEMO_CACHE_BYPASS_MODE: { TupleTableSlot *outerslot; @@ -797,7 +793,7 @@ ExecResultCache(PlanState *pstate) outerslot = ExecProcNode(outerNode); if (TupIsNull(outerslot)) { - node->rc_status = RC_END_OF_SCAN; + node->mstatus = MEMO_END_OF_SCAN; return NULL; } @@ -806,7 +802,7 @@ ExecResultCache(PlanState *pstate) return slot; } - case RC_END_OF_SCAN: + case MEMO_END_OF_SCAN: /* * We've already returned NULL for this scan, but just in case @@ -815,16 +811,16 @@ ExecResultCache(PlanState *pstate) return NULL; default: - elog(ERROR, "unrecognized resultcache state: %d", - (int) node->rc_status); + elog(ERROR, "unrecognized memoize state: %d", + (int) node->mstatus); return NULL; } /* switch */ } -ResultCacheState * -ExecInitResultCache(ResultCache *node, EState *estate, int eflags) +MemoizeState * +ExecInitMemoize(Memoize *node, EState *estate, int eflags) { - ResultCacheState *rcstate = makeNode(ResultCacheState); + MemoizeState *mstate = makeNode(MemoizeState); Plan *outerNode; int i; int nkeys; @@ -833,50 +829,50 @@ ExecInitResultCache(ResultCache *node, EState *estate, int eflags) /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); - rcstate->ss.ps.plan = (Plan *) node; - rcstate->ss.ps.state = estate; - rcstate->ss.ps.ExecProcNode = ExecResultCache; + mstate->ss.ps.plan = (Plan *) node; + mstate->ss.ps.state = estate; + mstate->ss.ps.ExecProcNode = ExecMemoize; /* * Miscellaneous initialization * * create expression context for node */ - ExecAssignExprContext(estate, &rcstate->ss.ps); + ExecAssignExprContext(estate, &mstate->ss.ps); outerNode = outerPlan(node); - outerPlanState(rcstate) = ExecInitNode(outerNode, estate, eflags); + outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags); /* * Initialize return slot and type. No need to initialize projection info * because this node doesn't do projections. */ - ExecInitResultTupleSlotTL(&rcstate->ss.ps, &TTSOpsMinimalTuple); - rcstate->ss.ps.ps_ProjInfo = NULL; + ExecInitResultTupleSlotTL(&mstate->ss.ps, &TTSOpsMinimalTuple); + mstate->ss.ps.ps_ProjInfo = NULL; /* * Initialize scan slot and type. */ - ExecCreateScanSlotFromOuterPlan(estate, &rcstate->ss, &TTSOpsMinimalTuple); + ExecCreateScanSlotFromOuterPlan(estate, &mstate->ss, &TTSOpsMinimalTuple); /* * Set the state machine to lookup the cache. We won't find anything * until we cache something, but this saves a special case to create the * first entry. */ - rcstate->rc_status = RC_CACHE_LOOKUP; + mstate->mstatus = MEMO_CACHE_LOOKUP; - rcstate->nkeys = nkeys = node->numKeys; - rcstate->hashkeydesc = ExecTypeFromExprList(node->param_exprs); - rcstate->tableslot = MakeSingleTupleTableSlot(rcstate->hashkeydesc, - &TTSOpsMinimalTuple); - rcstate->probeslot = MakeSingleTupleTableSlot(rcstate->hashkeydesc, - &TTSOpsVirtual); + mstate->nkeys = nkeys = node->numKeys; + mstate->hashkeydesc = ExecTypeFromExprList(node->param_exprs); + mstate->tableslot = MakeSingleTupleTableSlot(mstate->hashkeydesc, + &TTSOpsMinimalTuple); + mstate->probeslot = MakeSingleTupleTableSlot(mstate->hashkeydesc, + &TTSOpsVirtual); - rcstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *)); - rcstate->collations = node->collations; /* Just point directly to the plan + mstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *)); + mstate->collations = node->collations; /* Just point directly to the plan * data */ - rcstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo)); + mstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo)); eqfuncoids = palloc(nkeys * sizeof(Oid)); @@ -891,34 +887,34 @@ ExecInitResultCache(ResultCache *node, EState *estate, int eflags) elog(ERROR, "could not find hash function for hash operator %u", hashop); - fmgr_info(left_hashfn, &rcstate->hashfunctions[i]); + fmgr_info(left_hashfn, &mstate->hashfunctions[i]); - rcstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) rcstate); + mstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) mstate); eqfuncoids[i] = get_opcode(hashop); } - rcstate->cache_eq_expr = ExecBuildParamSetEqual(rcstate->hashkeydesc, - &TTSOpsMinimalTuple, - &TTSOpsVirtual, - eqfuncoids, - node->collations, - node->param_exprs, - (PlanState *) rcstate); + mstate->cache_eq_expr = ExecBuildParamSetEqual(mstate->hashkeydesc, + &TTSOpsMinimalTuple, + &TTSOpsVirtual, + eqfuncoids, + node->collations, + node->param_exprs, + (PlanState *) mstate); pfree(eqfuncoids); - rcstate->mem_used = 0; + mstate->mem_used = 0; /* Limit the total memory consumed by the cache to this */ - rcstate->mem_limit = get_hash_mem() * 1024L; + mstate->mem_limit = get_hash_mem() * 1024L; /* A memory context dedicated for the cache */ - rcstate->tableContext = AllocSetContextCreate(CurrentMemoryContext, - "ResultCacheHashTable", - ALLOCSET_DEFAULT_SIZES); + mstate->tableContext = AllocSetContextCreate(CurrentMemoryContext, + "MemoizeHashTable", + ALLOCSET_DEFAULT_SIZES); - dlist_init(&rcstate->lru_list); - rcstate->last_tuple = NULL; - rcstate->entry = NULL; + dlist_init(&mstate->lru_list); + mstate->last_tuple = NULL; + mstate->entry = NULL; /* * Mark if we can assume the cache entry is completed after we get the @@ -928,34 +924,34 @@ ExecInitResultCache(ResultCache *node, EState *estate, int eflags) * matching inner tuple. In this case, the cache entry is complete after * getting the first tuple. This allows us to mark it as so. */ - rcstate->singlerow = node->singlerow; + mstate->singlerow = node->singlerow; /* Zero the statistics counters */ - memset(&rcstate->stats, 0, sizeof(ResultCacheInstrumentation)); + memset(&mstate->stats, 0, sizeof(MemoizeInstrumentation)); /* Allocate and set up the actual cache */ - build_hash_table(rcstate, node->est_entries); + build_hash_table(mstate, node->est_entries); - return rcstate; + return mstate; } void -ExecEndResultCache(ResultCacheState *node) +ExecEndMemoize(MemoizeState *node) { #ifdef USE_ASSERT_CHECKING /* Validate the memory accounting code is correct in assert builds. */ { int count; uint64 mem = 0; - resultcache_iterator i; - ResultCacheEntry *entry; + memoize_iterator i; + MemoizeEntry *entry; - resultcache_start_iterate(node->hashtable, &i); + memoize_start_iterate(node->hashtable, &i); count = 0; - while ((entry = resultcache_iterate(node->hashtable, &i)) != NULL) + while ((entry = memoize_iterate(node->hashtable, &i)) != NULL) { - ResultCacheTuple *tuple = entry->tuplehead; + MemoizeTuple *tuple = entry->tuplehead; mem += EMPTY_ENTRY_MEMORY_BYTES(entry); while (tuple != NULL) @@ -978,7 +974,7 @@ ExecEndResultCache(ResultCacheState *node) */ if (node->shared_info != NULL && IsParallelWorker()) { - ResultCacheInstrumentation *si; + MemoizeInstrumentation *si; /* Make mem_peak available for EXPLAIN */ if (node->stats.mem_peak == 0) @@ -986,7 +982,7 @@ ExecEndResultCache(ResultCacheState *node) Assert(ParallelWorkerNumber <= node->shared_info->num_workers); si = &node->shared_info->sinstrument[ParallelWorkerNumber]; - memcpy(si, &node->stats, sizeof(ResultCacheInstrumentation)); + memcpy(si, &node->stats, sizeof(MemoizeInstrumentation)); } /* Remove the cache context */ @@ -1008,12 +1004,12 @@ ExecEndResultCache(ResultCacheState *node) } void -ExecReScanResultCache(ResultCacheState *node) +ExecReScanMemoize(MemoizeState *node) { PlanState *outerPlan = outerPlanState(node); /* Mark that we must lookup the cache for a new set of parameters */ - node->rc_status = RC_CACHE_LOOKUP; + node->mstatus = MEMO_CACHE_LOOKUP; /* nullify pointers used for the last scan */ node->entry = NULL; @@ -1036,8 +1032,8 @@ ExecReScanResultCache(ResultCacheState *node) double ExecEstimateCacheEntryOverheadBytes(double ntuples) { - return sizeof(ResultCacheEntry) + sizeof(ResultCacheKey) + - sizeof(ResultCacheTuple) * ntuples; + return sizeof(MemoizeEntry) + sizeof(MemoizeKey) + sizeof(MemoizeTuple) * + ntuples; } /* ---------------------------------------------------------------- @@ -1046,13 +1042,13 @@ ExecEstimateCacheEntryOverheadBytes(double ntuples) */ /* ---------------------------------------------------------------- - * ExecResultCacheEstimate + * ExecMemoizeEstimate * - * Estimate space required to propagate result cache statistics. + * Estimate space required to propagate memoize statistics. * ---------------------------------------------------------------- */ void -ExecResultCacheEstimate(ResultCacheState *node, ParallelContext *pcxt) +ExecMemoizeEstimate(MemoizeState *node, ParallelContext *pcxt) { Size size; @@ -1060,20 +1056,20 @@ ExecResultCacheEstimate(ResultCacheState *node, ParallelContext *pcxt) if (!node->ss.ps.instrument || pcxt->nworkers == 0) return; - size = mul_size(pcxt->nworkers, sizeof(ResultCacheInstrumentation)); - size = add_size(size, offsetof(SharedResultCacheInfo, sinstrument)); + size = mul_size(pcxt->nworkers, sizeof(MemoizeInstrumentation)); + size = add_size(size, offsetof(SharedMemoizeInfo, sinstrument)); shm_toc_estimate_chunk(&pcxt->estimator, size); shm_toc_estimate_keys(&pcxt->estimator, 1); } /* ---------------------------------------------------------------- - * ExecResultCacheInitializeDSM + * ExecMemoizeInitializeDSM * - * Initialize DSM space for result cache statistics. + * Initialize DSM space for memoize statistics. * ---------------------------------------------------------------- */ void -ExecResultCacheInitializeDSM(ResultCacheState *node, ParallelContext *pcxt) +ExecMemoizeInitializeDSM(MemoizeState *node, ParallelContext *pcxt) { Size size; @@ -1081,8 +1077,8 @@ ExecResultCacheInitializeDSM(ResultCacheState *node, ParallelContext *pcxt) if (!node->ss.ps.instrument || pcxt->nworkers == 0) return; - size = offsetof(SharedResultCacheInfo, sinstrument) - + pcxt->nworkers * sizeof(ResultCacheInstrumentation); + size = offsetof(SharedMemoizeInfo, sinstrument) + + pcxt->nworkers * sizeof(MemoizeInstrumentation); node->shared_info = shm_toc_allocate(pcxt->toc, size); /* ensure any unfilled slots will contain zeroes */ memset(node->shared_info, 0, size); @@ -1092,35 +1088,35 @@ ExecResultCacheInitializeDSM(ResultCacheState *node, ParallelContext *pcxt) } /* ---------------------------------------------------------------- - * ExecResultCacheInitializeWorker + * ExecMemoizeInitializeWorker * - * Attach worker to DSM space for result cache statistics. + * Attach worker to DSM space for memoize statistics. * ---------------------------------------------------------------- */ void -ExecResultCacheInitializeWorker(ResultCacheState *node, ParallelWorkerContext *pwcxt) +ExecMemoizeInitializeWorker(MemoizeState *node, ParallelWorkerContext *pwcxt) { node->shared_info = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true); } /* ---------------------------------------------------------------- - * ExecResultCacheRetrieveInstrumentation + * ExecMemoizeRetrieveInstrumentation * - * Transfer result cache statistics from DSM to private memory. + * Transfer memoize statistics from DSM to private memory. * ---------------------------------------------------------------- */ void -ExecResultCacheRetrieveInstrumentation(ResultCacheState *node) +ExecMemoizeRetrieveInstrumentation(MemoizeState *node) { Size size; - SharedResultCacheInfo *si; + SharedMemoizeInfo *si; if (node->shared_info == NULL) return; - size = offsetof(SharedResultCacheInfo, sinstrument) - + node->shared_info->num_workers * sizeof(ResultCacheInstrumentation); + size = offsetof(SharedMemoizeInfo, sinstrument) + + node->shared_info->num_workers * sizeof(MemoizeInstrumentation); si = palloc(size); memcpy(si, node->shared_info, size); node->shared_info = si; diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 6fef067957..9d4893c504 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -950,12 +950,12 @@ _copyMaterial(const Material *from) /* - * _copyResultCache + * _copyMemoize */ -static ResultCache * -_copyResultCache(const ResultCache *from) +static Memoize * +_copyMemoize(const Memoize *from) { - ResultCache *newnode = makeNode(ResultCache); + Memoize *newnode = makeNode(Memoize); /* * copy node superclass fields @@ -5079,8 +5079,8 @@ copyObjectImpl(const void *from) case T_Material: retval = _copyMaterial(from); break; - case T_ResultCache: - retval = _copyResultCache(from); + case T_Memoize: + retval = _copyMemoize(from); break; case T_Sort: retval = _copySort(from); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index e09e4f77fe..e73be21bd6 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -848,9 +848,9 @@ _outMaterial(StringInfo str, const Material *node) } static void -_outResultCache(StringInfo str, const ResultCache *node) +_outMemoize(StringInfo str, const Memoize *node) { - WRITE_NODE_TYPE("RESULTCACHE"); + WRITE_NODE_TYPE("MEMOIZE"); _outPlanInfo(str, (const Plan *) node); @@ -1949,9 +1949,9 @@ _outMaterialPath(StringInfo str, const MaterialPath *node) } static void -_outResultCachePath(StringInfo str, const ResultCachePath *node) +_outMemoizePath(StringInfo str, const MemoizePath *node) { - WRITE_NODE_TYPE("RESULTCACHEPATH"); + WRITE_NODE_TYPE("MEMOIZEPATH"); _outPathInfo(str, (const Path *) node); @@ -3961,8 +3961,8 @@ outNode(StringInfo str, const void *obj) case T_Material: _outMaterial(str, obj); break; - case T_ResultCache: - _outResultCache(str, obj); + case T_Memoize: + _outMemoize(str, obj); break; case T_Sort: _outSort(str, obj); @@ -4201,8 +4201,8 @@ outNode(StringInfo str, const void *obj) case T_MaterialPath: _outMaterialPath(str, obj); break; - case T_ResultCachePath: - _outResultCachePath(str, obj); + case T_MemoizePath: + _outMemoizePath(str, obj); break; case T_UniquePath: _outUniquePath(str, obj); diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 3dec0a2508..77d082d8b4 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -2216,12 +2216,12 @@ _readMaterial(void) } /* - * _readResultCache + * _readMemoize */ -static ResultCache * -_readResultCache(void) +static Memoize * +_readMemoize(void) { - READ_LOCALS(ResultCache); + READ_LOCALS(Memoize); ReadCommonPlan(&local_node->plan); @@ -2923,8 +2923,8 @@ parseNodeString(void) return_value = _readHashJoin(); else if (MATCH("MATERIAL", 8)) return_value = _readMaterial(); - else if (MATCH("RESULTCACHE", 11)) - return_value = _readResultCache(); + else if (MATCH("MEMOIZE", 7)) + return_value = _readMemoize(); else if (MATCH("SORT", 4)) return_value = _readSort(); else if (MATCH("INCREMENTALSORT", 15)) diff --git a/src/backend/optimizer/README b/src/backend/optimizer/README index 4aefde8bb1..2339347c24 100644 --- a/src/backend/optimizer/README +++ b/src/backend/optimizer/README @@ -382,7 +382,7 @@ RelOptInfo - a relation or joined relations MergeAppendPath - merge multiple subpaths, preserving their common sort order GroupResultPath - childless Result plan node (used for degenerate grouping) MaterialPath - a Material plan node - ResultCachePath - a result cache plan node for caching tuples from sub-paths + MemoizePath - a Memoize plan node for caching tuples from sub-paths UniquePath - remove duplicate rows (either by hashing or sorting) GatherPath - collect the results of parallel workers GatherMergePath - collect parallel results, preserving their common sort order diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 17febfff8a..671117314a 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -4031,9 +4031,9 @@ print_path(PlannerInfo *root, Path *path, int indent) ptype = "Material"; subpath = ((MaterialPath *) path)->subpath; break; - case T_ResultCachePath: - ptype = "ResultCache"; - subpath = ((ResultCachePath *) path)->subpath; + case T_MemoizePath: + ptype = "Memoize"; + subpath = ((MemoizePath *) path)->subpath; break; case T_UniquePath: ptype = "Unique"; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 8577c7b138..b54cf34a8e 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -79,7 +79,7 @@ #include "executor/executor.h" #include "executor/nodeAgg.h" #include "executor/nodeHash.h" -#include "executor/nodeResultCache.h" +#include "executor/nodeMemoize.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" @@ -140,7 +140,7 @@ bool enable_incremental_sort = true; bool enable_hashagg = true; bool enable_nestloop = true; bool enable_material = true; -bool enable_resultcache = true; +bool enable_memoize = true; bool enable_mergejoin = true; bool enable_hashjoin = true; bool enable_gathermerge = true; @@ -2405,8 +2405,8 @@ cost_material(Path *path, } /* - * cost_resultcache_rescan - * Determines the estimated cost of rescanning a ResultCache node. + * cost_memoize_rescan + * Determines the estimated cost of rescanning a Memoize node. * * In order to estimate this, we must gain knowledge of how often we expect to * be called and how many distinct sets of parameters we are likely to be @@ -2418,15 +2418,15 @@ cost_material(Path *path, * hit and caching would be a complete waste of effort. */ static void -cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath, - Cost *rescan_startup_cost, Cost *rescan_total_cost) +cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, + Cost *rescan_startup_cost, Cost *rescan_total_cost) { EstimationInfo estinfo; - Cost input_startup_cost = rcpath->subpath->startup_cost; - Cost input_total_cost = rcpath->subpath->total_cost; - double tuples = rcpath->subpath->rows; - double calls = rcpath->calls; - int width = rcpath->subpath->pathtarget->width; + Cost input_startup_cost = mpath->subpath->startup_cost; + Cost input_total_cost = mpath->subpath->total_cost; + double tuples = mpath->subpath->rows; + double calls = mpath->calls; + int width = mpath->subpath->pathtarget->width; double hash_mem_bytes; double est_entry_bytes; @@ -2455,16 +2455,16 @@ cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath, est_cache_entries = floor(hash_mem_bytes / est_entry_bytes); /* estimate on the distinct number of parameter values */ - ndistinct = estimate_num_groups(root, rcpath->param_exprs, calls, NULL, + ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL, &estinfo); /* * When the estimation fell back on using a default value, it's a bit too - * risky to assume that it's ok to use a Result Cache. The use of a - * default could cause us to use a Result Cache when it's really + * risky to assume that it's ok to use a Memoize node. The use of a + * default could cause us to use a Memoize node when it's really * inappropriate to do so. If we see that this has been done, then we'll * assume that every call will have unique parameters, which will almost - * certainly mean a ResultCachePath will never survive add_path(). + * certainly mean a MemoizePath will never survive add_path(). */ if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0) ndistinct = calls; @@ -2478,8 +2478,8 @@ cost_resultcache_rescan(PlannerInfo *root, ResultCachePath *rcpath, * size itself. Really this is not the right place to do this, but it's * convenient since everything is already calculated. */ - rcpath->est_entries = Min(Min(ndistinct, est_cache_entries), - PG_UINT32_MAX); + mpath->est_entries = Min(Min(ndistinct, est_cache_entries), + PG_UINT32_MAX); /* * When the number of distinct parameter values is above the amount we can @@ -4285,10 +4285,10 @@ cost_rescan(PlannerInfo *root, Path *path, *rescan_total_cost = run_cost; } break; - case T_ResultCache: - /* All the hard work is done by cost_resultcache_rescan */ - cost_resultcache_rescan(root, (ResultCachePath *) path, - rescan_startup_cost, rescan_total_cost); + case T_Memoize: + /* All the hard work is done by cost_memoize_rescan */ + cost_memoize_rescan(root, (MemoizePath *) path, + rescan_startup_cost, rescan_total_cost); break; default: *rescan_startup_cost = path->startup_cost; diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index b67b517770..6407ede12a 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -171,7 +171,7 @@ add_paths_to_joinrel(PlannerInfo *root, case JOIN_ANTI: /* - * XXX it may be worth proving this to allow a ResultCache to be + * XXX it may be worth proving this to allow a Memoize to be * considered for Nested Loop Semi/Anti Joins. */ extra.inner_unique = false; /* well, unproven */ @@ -395,7 +395,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info, OpExpr *opexpr; Node *expr; - /* can't use result cache without a valid hash equals operator */ + /* can't use a memoize node without a valid hash equals operator */ if (!OidIsValid(rinfo->hasheqoperator) || !clause_sides_match_join(rinfo, outerrel, innerrel)) { @@ -436,7 +436,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info, typentry = lookup_type_cache(exprType(expr), TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR); - /* can't use result cache without a valid hash equals operator */ + /* can't use a memoize node without a valid hash equals operator */ if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr)) { list_free(*operators); @@ -448,27 +448,27 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info, *param_exprs = lappend(*param_exprs, expr); } - /* We're okay to use result cache */ + /* We're okay to use memoize */ return true; } /* - * get_resultcache_path - * If possible, make and return a Result Cache path atop of 'inner_path'. + * get_memoize_path + * If possible, make and return a Memoize path atop of 'inner_path'. * Otherwise return NULL. */ static Path * -get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel, - RelOptInfo *outerrel, Path *inner_path, - Path *outer_path, JoinType jointype, - JoinPathExtraData *extra) +get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel, + RelOptInfo *outerrel, Path *inner_path, + Path *outer_path, JoinType jointype, + JoinPathExtraData *extra) { List *param_exprs; List *hash_operators; ListCell *lc; /* Obviously not if it's disabled */ - if (!enable_resultcache) + if (!enable_memoize) return NULL; /* @@ -481,7 +481,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel, return NULL; /* - * We can only have a result cache when there's some kind of cache key, + * We can only have a memoize node when there's some kind of cache key, * either parameterized path clauses or lateral Vars. No cache key sounds * more like something a Materialize node might be more useful for. */ @@ -493,8 +493,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel, /* * Currently we don't do this for SEMI and ANTI joins unless they're * marked as inner_unique. This is because nested loop SEMI/ANTI joins - * don't scan the inner node to completion, which will mean result cache - * cannot mark the cache entry as complete. + * don't scan the inner node to completion, which will mean memoize cannot + * mark the cache entry as complete. * * XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique * = true. Should we? See add_paths_to_joinrel() @@ -504,8 +504,8 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel, return NULL; /* - * Result Cache normally marks cache entries as complete when it runs out - * of tuples to read from its subplan. However, with unique joins, Nested + * Memoize normally marks cache entries as complete when it runs out of + * tuples to read from its subplan. However, with unique joins, Nested * Loop will skip to the next outer tuple after finding the first matching * inner tuple. This means that we may not read the inner side of the * join to completion which leaves no opportunity to mark the cache entry @@ -516,11 +516,11 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel, * condition, we can't be sure which part of it causes the join to be * unique. This means there are no guarantees that only 1 tuple will be * read. We cannot mark the cache entry as complete after reading the - * first tuple without that guarantee. This means the scope of Result - * Cache's usefulness is limited to only outer rows that have no join + * first tuple without that guarantee. This means the scope of Memoize + * node's usefulness is limited to only outer rows that have no join * partner as this is the only case where Nested Loop would exhaust the * inner scan of a unique join. Since the scope is limited to that, we - * just don't bother making a result cache path in this case. + * just don't bother making a memoize path in this case. * * Lateral vars needn't be considered here as they're not considered when * determining if the join is unique. @@ -536,7 +536,7 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel, return NULL; /* - * We can't use a result cache if there are volatile functions in the + * We can't use a memoize node if there are volatile functions in the * inner rel's target list or restrict list. A cache hit could reduce the * number of calls to these functions. */ @@ -559,13 +559,13 @@ get_resultcache_path(PlannerInfo *root, RelOptInfo *innerrel, ¶m_exprs, &hash_operators)) { - return (Path *) create_resultcache_path(root, - innerrel, - inner_path, - param_exprs, - hash_operators, - extra->inner_unique, - outer_path->parent->rows); + return (Path *) create_memoize_path(root, + innerrel, + inner_path, + param_exprs, + hash_operators, + extra->inner_unique, + outer_path->parent->rows); } return NULL; @@ -1688,7 +1688,7 @@ match_unsorted_outer(PlannerInfo *root, foreach(lc2, innerrel->cheapest_parameterized_paths) { Path *innerpath = (Path *) lfirst(lc2); - Path *rcpath; + Path *mpath; try_nestloop_path(root, joinrel, @@ -1699,17 +1699,17 @@ match_unsorted_outer(PlannerInfo *root, extra); /* - * Try generating a result cache path and see if that makes - * the nested loop any cheaper. + * Try generating a memoize path and see if that makes the + * nested loop any cheaper. */ - rcpath = get_resultcache_path(root, innerrel, outerrel, - innerpath, outerpath, jointype, - extra); - if (rcpath != NULL) + mpath = get_memoize_path(root, innerrel, outerrel, + innerpath, outerpath, jointype, + extra); + if (mpath != NULL) try_nestloop_path(root, joinrel, outerpath, - rcpath, + mpath, merge_pathkeys, jointype, extra); @@ -1867,7 +1867,7 @@ consider_parallel_nestloop(PlannerInfo *root, foreach(lc2, innerrel->cheapest_parameterized_paths) { Path *innerpath = (Path *) lfirst(lc2); - Path *rcpath; + Path *mpath; /* Can't join to an inner path that is not parallel-safe */ if (!innerpath->parallel_safe) @@ -1894,14 +1894,14 @@ consider_parallel_nestloop(PlannerInfo *root, pathkeys, jointype, extra); /* - * Try generating a result cache path and see if that makes the - * nested loop any cheaper. + * Try generating a memoize path and see if that makes the nested + * loop any cheaper. */ - rcpath = get_resultcache_path(root, innerrel, outerrel, - innerpath, outerpath, jointype, - extra); - if (rcpath != NULL) - try_partial_nestloop_path(root, joinrel, outerpath, rcpath, + mpath = get_memoize_path(root, innerrel, outerrel, + innerpath, outerpath, jointype, + extra); + if (mpath != NULL) + try_partial_nestloop_path(root, joinrel, outerpath, mpath, pathkeys, jointype, extra); } } diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index c13da7a879..d3f8639a40 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -92,9 +92,8 @@ static Result *create_group_result_plan(PlannerInfo *root, static ProjectSet *create_project_set_plan(PlannerInfo *root, ProjectSetPath *best_path); static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags); -static ResultCache *create_resultcache_plan(PlannerInfo *root, - ResultCachePath *best_path, - int flags); +static Memoize *create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, + int flags); static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags); static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path); @@ -278,11 +277,9 @@ static Sort *make_sort_from_groupcols(List *groupcls, AttrNumber *grpColIdx, Plan *lefttree); static Material *make_material(Plan *lefttree); -static ResultCache *make_resultcache(Plan *lefttree, Oid *hashoperators, - Oid *collations, - List *param_exprs, - bool singlerow, - uint32 est_entries); +static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators, + Oid *collations, List *param_exprs, + bool singlerow, uint32 est_entries); static WindowAgg *make_windowagg(List *tlist, Index winref, int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations, int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations, @@ -459,10 +456,10 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags) (MaterialPath *) best_path, flags); break; - case T_ResultCache: - plan = (Plan *) create_resultcache_plan(root, - (ResultCachePath *) best_path, - flags); + case T_Memoize: + plan = (Plan *) create_memoize_plan(root, + (MemoizePath *) best_path, + flags); break; case T_Unique: if (IsA(best_path, UpperUniquePath)) @@ -1578,16 +1575,16 @@ create_material_plan(PlannerInfo *root, MaterialPath *best_path, int flags) } /* - * create_resultcache_plan - * Create a ResultCache plan for 'best_path' and (recursively) plans - * for its subpaths. + * create_memoize_plan + * Create a Memoize plan for 'best_path' and (recursively) plans for its + * subpaths. * * Returns a Plan node. */ -static ResultCache * -create_resultcache_plan(PlannerInfo *root, ResultCachePath *best_path, int flags) +static Memoize * +create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags) { - ResultCache *plan; + Memoize *plan; Plan *subplan; Oid *operators; Oid *collations; @@ -1619,8 +1616,8 @@ create_resultcache_plan(PlannerInfo *root, ResultCachePath *best_path, int flags i++; } - plan = make_resultcache(subplan, operators, collations, param_exprs, - best_path->singlerow, best_path->est_entries); + plan = make_memoize(subplan, operators, collations, param_exprs, + best_path->singlerow, best_path->est_entries); copy_generic_path_info(&plan->plan, (Path *) best_path); @@ -6417,11 +6414,11 @@ materialize_finished_plan(Plan *subplan) return matplan; } -static ResultCache * -make_resultcache(Plan *lefttree, Oid *hashoperators, Oid *collations, - List *param_exprs, bool singlerow, uint32 est_entries) +static Memoize * +make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations, + List *param_exprs, bool singlerow, uint32 est_entries) { - ResultCache *node = makeNode(ResultCache); + Memoize *node = makeNode(Memoize); Plan *plan = &node->plan; plan->targetlist = lefttree->targetlist; @@ -7035,7 +7032,7 @@ is_projection_capable_path(Path *path) { case T_Hash: case T_Material: - case T_ResultCache: + case T_Memoize: case T_Sort: case T_IncrementalSort: case T_Unique: @@ -7085,7 +7082,7 @@ is_projection_capable_plan(Plan *plan) { case T_Hash: case T_Material: - case T_ResultCache: + case T_Memoize: case T_Sort: case T_Unique: case T_SetOp: diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index 3ac853d9ef..e25dc9a7ca 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -78,7 +78,7 @@ static bool check_equivalence_delay(PlannerInfo *root, static bool check_redundant_nullability_qual(PlannerInfo *root, Node *clause); static void check_mergejoinable(RestrictInfo *restrictinfo); static void check_hashjoinable(RestrictInfo *restrictinfo); -static void check_resultcacheable(RestrictInfo *restrictinfo); +static void check_memoizable(RestrictInfo *restrictinfo); /***************************************************************************** @@ -2212,10 +2212,10 @@ distribute_restrictinfo_to_rels(PlannerInfo *root, /* * Likewise, check if the clause is suitable to be used with a - * Result Cache node to cache inner tuples during a parameterized + * Memoize node to cache inner tuples during a parameterized * nested loop. */ - check_resultcacheable(restrictinfo); + check_memoizable(restrictinfo); /* * Add clause to the join lists of all the relevant relations. @@ -2459,7 +2459,7 @@ build_implied_join_equality(PlannerInfo *root, /* Set mergejoinability/hashjoinability flags */ check_mergejoinable(restrictinfo); check_hashjoinable(restrictinfo); - check_resultcacheable(restrictinfo); + check_memoizable(restrictinfo); return restrictinfo; } @@ -2709,13 +2709,13 @@ check_hashjoinable(RestrictInfo *restrictinfo) } /* - * check_resultcacheable - * If the restrictinfo's clause is suitable to be used for a Result Cache - * node, set the hasheqoperator to the hash equality operator that will be - * needed during caching. + * check_memoizable + * If the restrictinfo's clause is suitable to be used for a Memoize node, + * set the hasheqoperator to the hash equality operator that will be needed + * during caching. */ static void -check_resultcacheable(RestrictInfo *restrictinfo) +check_memoizable(RestrictInfo *restrictinfo) { TypeCacheEntry *typentry; Expr *clause = restrictinfo->clause; diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 210c4b3b14..26f6872b4b 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -752,19 +752,19 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) set_hash_references(root, plan, rtoffset); break; - case T_ResultCache: + case T_Memoize: { - ResultCache *rcplan = (ResultCache *) plan; + Memoize *mplan = (Memoize *) plan; /* - * Result Cache does not evaluate its targetlist. It just - * uses the same targetlist from its outer subnode. + * Memoize does not evaluate its targetlist. It just uses the + * same targetlist from its outer subnode. */ set_dummy_tlist_references(plan, rtoffset); - rcplan->param_exprs = fix_scan_list(root, rcplan->param_exprs, - rtoffset, - NUM_EXEC_TLIST(plan)); + mplan->param_exprs = fix_scan_list(root, mplan->param_exprs, + rtoffset, + NUM_EXEC_TLIST(plan)); break; } diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index 0881a208ac..b5a61f3933 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -2745,8 +2745,8 @@ finalize_plan(PlannerInfo *root, Plan *plan, /* rescan_param does *not* get added to scan_params */ break; - case T_ResultCache: - finalize_primnode((Node *) ((ResultCache *) plan)->param_exprs, + case T_Memoize: + finalize_primnode((Node *) ((Memoize *) plan)->param_exprs, &context); break; diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 9ce5f95e3b..0c94cbe767 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -1577,20 +1577,19 @@ create_material_path(RelOptInfo *rel, Path *subpath) } /* - * create_resultcache_path - * Creates a path corresponding to a ResultCache plan, returning the - * pathnode. + * create_memoize_path + * Creates a path corresponding to a Memoize plan, returning the pathnode. */ -ResultCachePath * -create_resultcache_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, - List *param_exprs, List *hash_operators, - bool singlerow, double calls) +MemoizePath * +create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, + List *param_exprs, List *hash_operators, + bool singlerow, double calls) { - ResultCachePath *pathnode = makeNode(ResultCachePath); + MemoizePath *pathnode = makeNode(MemoizePath); Assert(subpath->parent == rel); - pathnode->path.pathtype = T_ResultCache; + pathnode->path.pathtype = T_Memoize; pathnode->path.parent = rel; pathnode->path.pathtarget = rel->reltarget; pathnode->path.param_info = subpath->param_info; @@ -1607,17 +1606,16 @@ create_resultcache_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, pathnode->calls = calls; /* - * For now we set est_entries to 0. cost_resultcache_rescan() does all - * the hard work to determine how many cache entries there are likely to - * be, so it seems best to leave it up to that function to fill this field - * in. If left at 0, the executor will make a guess at a good value. + * For now we set est_entries to 0. cost_memoize_rescan() does all the + * hard work to determine how many cache entries there are likely to be, + * so it seems best to leave it up to that function to fill this field in. + * If left at 0, the executor will make a guess at a good value. */ pathnode->est_entries = 0; /* * Add a small additional charge for caching the first entry. All the - * harder calculations for rescans are performed in - * cost_resultcache_rescan(). + * harder calculations for rescans are performed in cost_memoize_rescan(). */ pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost; pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost; @@ -3936,16 +3934,16 @@ reparameterize_path(PlannerInfo *root, Path *path, apath->path.parallel_aware, -1); } - case T_ResultCache: + case T_Memoize: { - ResultCachePath *rcpath = (ResultCachePath *) path; + MemoizePath *mpath = (MemoizePath *) path; - return (Path *) create_resultcache_path(root, rel, - rcpath->subpath, - rcpath->param_exprs, - rcpath->hash_operators, - rcpath->singlerow, - rcpath->calls); + return (Path *) create_memoize_path(root, rel, + mpath->subpath, + mpath->param_exprs, + mpath->hash_operators, + mpath->singlerow, + mpath->calls); } default: break; @@ -4165,13 +4163,13 @@ do { \ } break; - case T_ResultCachePath: + case T_MemoizePath: { - ResultCachePath *rcpath; + MemoizePath *mpath; - FLAT_COPY_PATH(rcpath, path, ResultCachePath); - REPARAMETERIZE_CHILD_PATH(rcpath->subpath); - new_path = (Path *) rcpath; + FLAT_COPY_PATH(mpath, path, MemoizePath); + REPARAMETERIZE_CHILD_PATH(mpath->subpath); + new_path = (Path *) mpath; } break; diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 45a053ca40..a2e0f8de7e 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1058,12 +1058,12 @@ static struct config_bool ConfigureNamesBool[] = NULL, NULL, NULL }, { - {"enable_resultcache", PGC_USERSET, QUERY_TUNING_METHOD, - gettext_noop("Enables the planner's use of result caching."), + {"enable_memoize", PGC_USERSET, QUERY_TUNING_METHOD, + gettext_noop("Enables the planner's use of memoization."), NULL, GUC_EXPLAIN }, - &enable_resultcache, + &enable_memoize, true, NULL, NULL, NULL }, diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index b696abfe54..ccaaf63850 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -367,7 +367,7 @@ #enable_indexscan = on #enable_indexonlyscan = on #enable_material = on -#enable_resultcache = on +#enable_memoize = on #enable_mergejoin = on #enable_nestloop = on #enable_parallel_append = on diff --git a/src/include/executor/nodeMemoize.h b/src/include/executor/nodeMemoize.h new file mode 100644 index 0000000000..898fa43816 --- /dev/null +++ b/src/include/executor/nodeMemoize.h @@ -0,0 +1,32 @@ +/*------------------------------------------------------------------------- + * + * nodeMemoize.h + * + * + * + * Portions Copyright (c) 2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/executor/nodeMemoize.h + * + *------------------------------------------------------------------------- + */ +#ifndef NODEMEMOIZE_H +#define NODEMEMOIZE_H + +#include "access/parallel.h" +#include "nodes/execnodes.h" + +extern MemoizeState *ExecInitMemoize(Memoize *node, EState *estate, int eflags); +extern void ExecEndMemoize(MemoizeState *node); +extern void ExecReScanMemoize(MemoizeState *node); +extern double ExecEstimateCacheEntryOverheadBytes(double ntuples); +extern void ExecMemoizeEstimate(MemoizeState *node, + ParallelContext *pcxt); +extern void ExecMemoizeInitializeDSM(MemoizeState *node, + ParallelContext *pcxt); +extern void ExecMemoizeInitializeWorker(MemoizeState *node, + ParallelWorkerContext *pwcxt); +extern void ExecMemoizeRetrieveInstrumentation(MemoizeState *node); + +#endif /* NODEMEMOIZE_H */ diff --git a/src/include/executor/nodeResultCache.h b/src/include/executor/nodeResultCache.h deleted file mode 100644 index e7a3e7ab9c..0000000000 --- a/src/include/executor/nodeResultCache.h +++ /dev/null @@ -1,32 +0,0 @@ -/*------------------------------------------------------------------------- - * - * nodeResultCache.h - * - * - * - * Portions Copyright (c) 2021, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/executor/nodeResultCache.h - * - *------------------------------------------------------------------------- - */ -#ifndef NODERESULTCACHE_H -#define NODERESULTCACHE_H - -#include "access/parallel.h" -#include "nodes/execnodes.h" - -extern ResultCacheState *ExecInitResultCache(ResultCache *node, EState *estate, int eflags); -extern void ExecEndResultCache(ResultCacheState *node); -extern void ExecReScanResultCache(ResultCacheState *node); -extern double ExecEstimateCacheEntryOverheadBytes(double ntuples); -extern void ExecResultCacheEstimate(ResultCacheState *node, - ParallelContext *pcxt); -extern void ExecResultCacheInitializeDSM(ResultCacheState *node, - ParallelContext *pcxt); -extern void ExecResultCacheInitializeWorker(ResultCacheState *node, - ParallelWorkerContext *pwcxt); -extern void ExecResultCacheRetrieveInstrumentation(ResultCacheState *node); - -#endif /* NODERESULTCACHE_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 0ec5509e7e..105180764e 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -2046,11 +2046,11 @@ typedef struct MaterialState Tuplestorestate *tuplestorestate; } MaterialState; -struct ResultCacheEntry; -struct ResultCacheTuple; -struct ResultCacheKey; +struct MemoizeEntry; +struct MemoizeTuple; +struct MemoizeKey; -typedef struct ResultCacheInstrumentation +typedef struct MemoizeInstrumentation { uint64 cache_hits; /* number of rescans where we've found the * scan parameter values to be cached */ @@ -2063,31 +2063,31 @@ typedef struct ResultCacheInstrumentation * able to free enough space to store the * current scan's tuples. */ uint64 mem_peak; /* peak memory usage in bytes */ -} ResultCacheInstrumentation; +} MemoizeInstrumentation; /* ---------------- - * Shared memory container for per-worker resultcache information + * Shared memory container for per-worker memoize information * ---------------- */ -typedef struct SharedResultCacheInfo +typedef struct SharedMemoizeInfo { int num_workers; - ResultCacheInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]; -} SharedResultCacheInfo; + MemoizeInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]; +} SharedMemoizeInfo; /* ---------------- - * ResultCacheState information + * MemoizeState information * - * resultcache nodes are used to cache recent and commonly seen results - * from a parameterized scan. + * memoize nodes are used to cache recent and commonly seen results from + * a parameterized scan. * ---------------- */ -typedef struct ResultCacheState +typedef struct MemoizeState { ScanState ss; /* its first field is NodeTag */ - int rc_status; /* value of ExecResultCache state machine */ + int mstatus; /* value of ExecMemoize state machine */ int nkeys; /* number of cache keys */ - struct resultcache_hash *hashtable; /* hash table for cache entries */ + struct memoize_hash *hashtable; /* hash table for cache entries */ TupleDesc hashkeydesc; /* tuple descriptor for cache keys */ TupleTableSlot *tableslot; /* min tuple slot for existing cache entries */ TupleTableSlot *probeslot; /* virtual slot used for hash lookups */ @@ -2100,17 +2100,17 @@ typedef struct ResultCacheState uint64 mem_limit; /* memory limit in bytes for the cache */ MemoryContext tableContext; /* memory context to store cache data */ dlist_head lru_list; /* least recently used entry list */ - struct ResultCacheTuple *last_tuple; /* Used to point to the last tuple - * returned during a cache hit and - * the tuple we last stored when - * populating the cache. */ - struct ResultCacheEntry *entry; /* the entry that 'last_tuple' belongs to - * or NULL if 'last_tuple' is NULL. */ + struct MemoizeTuple *last_tuple; /* Used to point to the last tuple + * returned during a cache hit and the + * tuple we last stored when + * populating the cache. */ + struct MemoizeEntry *entry; /* the entry that 'last_tuple' belongs to or + * NULL if 'last_tuple' is NULL. */ bool singlerow; /* true if the cache entry is to be marked as * complete after caching the first tuple. */ - ResultCacheInstrumentation stats; /* execution statistics */ - SharedResultCacheInfo *shared_info; /* statistics for parallel workers */ -} ResultCacheState; + MemoizeInstrumentation stats; /* execution statistics */ + SharedMemoizeInfo *shared_info; /* statistics for parallel workers */ +} MemoizeState; /* ---------------- * When performing sorting by multiple keys, it's possible that the input diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index d9e417bcd7..f7b009ec43 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -74,7 +74,7 @@ typedef enum NodeTag T_MergeJoin, T_HashJoin, T_Material, - T_ResultCache, + T_Memoize, T_Sort, T_IncrementalSort, T_Group, @@ -133,7 +133,7 @@ typedef enum NodeTag T_MergeJoinState, T_HashJoinState, T_MaterialState, - T_ResultCacheState, + T_MemoizeState, T_SortState, T_IncrementalSortState, T_GroupState, @@ -244,7 +244,7 @@ typedef enum NodeTag T_MergeAppendPath, T_GroupResultPath, T_MaterialPath, - T_ResultCachePath, + T_MemoizePath, T_UniquePath, T_GatherPath, T_GatherMergePath, diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index bebf774818..a692bcfb53 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -1495,11 +1495,11 @@ typedef struct MaterialPath } MaterialPath; /* - * ResultCachePath represents a ResultCache plan node, i.e., a cache that - * caches tuples from parameterized paths to save the underlying node from - * having to be rescanned for parameter values which are already cached. + * MemoizePath represents a Memoize plan node, i.e., a cache that caches + * tuples from parameterized paths to save the underlying node from having to + * be rescanned for parameter values which are already cached. */ -typedef struct ResultCachePath +typedef struct MemoizePath { Path path; Path *subpath; /* outerpath to cache tuples from */ @@ -1511,7 +1511,7 @@ typedef struct ResultCachePath uint32 est_entries; /* The maximum number of entries that the * planner expects will fit in the cache, or 0 * if unknown */ -} ResultCachePath; +} MemoizePath; /* * UniquePath represents elimination of distinct rows from the output of @@ -2111,7 +2111,7 @@ typedef struct RestrictInfo Selectivity left_mcvfreq; /* left side's most common val's freq */ Selectivity right_mcvfreq; /* right side's most common val's freq */ - /* hash equality operator used for result cache, else InvalidOid */ + /* hash equality operator used for memoize nodes, else InvalidOid */ Oid hasheqoperator; } RestrictInfo; diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index aaa3b65d04..98a4c73f93 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -781,10 +781,10 @@ typedef struct Material } Material; /* ---------------- - * result cache node + * memoize node * ---------------- */ -typedef struct ResultCache +typedef struct Memoize { Plan plan; @@ -799,7 +799,7 @@ typedef struct ResultCache uint32 est_entries; /* The maximum number of entries that the * planner expects will fit in the cache, or 0 * if unknown */ -} ResultCache; +} Memoize; /* ---------------- * sort node diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h index 0fe60d82e4..2113bc82de 100644 --- a/src/include/optimizer/cost.h +++ b/src/include/optimizer/cost.h @@ -57,7 +57,7 @@ extern PGDLLIMPORT bool enable_incremental_sort; extern PGDLLIMPORT bool enable_hashagg; extern PGDLLIMPORT bool enable_nestloop; extern PGDLLIMPORT bool enable_material; -extern PGDLLIMPORT bool enable_resultcache; +extern PGDLLIMPORT bool enable_memoize; extern PGDLLIMPORT bool enable_mergejoin; extern PGDLLIMPORT bool enable_hashjoin; extern PGDLLIMPORT bool enable_gathermerge; diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h index 53261ee91f..f704d39980 100644 --- a/src/include/optimizer/pathnode.h +++ b/src/include/optimizer/pathnode.h @@ -82,13 +82,13 @@ extern GroupResultPath *create_group_result_path(PlannerInfo *root, PathTarget *target, List *havingqual); extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath); -extern ResultCachePath *create_resultcache_path(PlannerInfo *root, - RelOptInfo *rel, - Path *subpath, - List *param_exprs, - List *hash_operators, - bool singlerow, - double calls); +extern MemoizePath *create_memoize_path(PlannerInfo *root, + RelOptInfo *rel, + Path *subpath, + List *param_exprs, + List *hash_operators, + bool singlerow, + double calls); extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo); extern GatherPath *create_gather_path(PlannerInfo *root, diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out index ca06d41dd0..23b112b2af 100644 --- a/src/test/regress/expected/aggregates.out +++ b/src/test/regress/expected/aggregates.out @@ -2584,7 +2584,7 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) -- Make sure that generation of HashAggregate for uniqification purposes -- does not lead to array overflow due to unexpected duplicate hash keys -- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek); @@ -2600,7 +2600,7 @@ explain (costs off) -> Seq Scan on onek (8 rows) -reset enable_resultcache; +reset enable_memoize; -- -- Hash Aggregation Spill tests -- diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 19cd056987..f3589d0dbb 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -2536,7 +2536,7 @@ reset enable_nestloop; -- set work_mem to '64kB'; set enable_mergejoin to off; -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select count(*) from tenk1 a, tenk1 b where a.hundred = b.thousand and (b.fivethous % 10) < 10; @@ -2560,7 +2560,7 @@ select count(*) from tenk1 a, tenk1 b reset work_mem; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; -- -- regression test for 8.2 bug with improper re-ordering of left joins -- @@ -3684,7 +3684,7 @@ where t1.unique1 = 1; Recheck Cond: (t1.hundred = hundred) -> Bitmap Index Scan on tenk1_hundred Index Cond: (hundred = t1.hundred) - -> Result Cache + -> Memoize Cache Key: t2.thousand -> Index Scan using tenk1_unique2 on tenk1 t3 Index Cond: (unique2 = t2.thousand) @@ -3706,7 +3706,7 @@ where t1.unique1 = 1; Recheck Cond: (t1.hundred = hundred) -> Bitmap Index Scan on tenk1_hundred Index Cond: (hundred = t1.hundred) - -> Result Cache + -> Memoize Cache Key: t2.thousand -> Index Scan using tenk1_unique2 on tenk1 t3 Index Cond: (unique2 = t2.thousand) @@ -4235,7 +4235,7 @@ where t1.f1 = ss.f1; -> Seq Scan on public.int8_tbl i8 Output: i8.q1, i8.q2 Filter: (i8.q2 = 123) - -> Result Cache + -> Memoize Output: (i8.q1), t2.f1 Cache Key: i8.q1 -> Limit @@ -4279,14 +4279,14 @@ where t1.f1 = ss2.f1; -> Seq Scan on public.int8_tbl i8 Output: i8.q1, i8.q2 Filter: (i8.q2 = 123) - -> Result Cache + -> Memoize Output: (i8.q1), t2.f1 Cache Key: i8.q1 -> Limit Output: (i8.q1), t2.f1 -> Seq Scan on public.text_tbl t2 Output: i8.q1, t2.f1 - -> Result Cache + -> Memoize Output: ((i8.q1)), (t2.f1) Cache Key: (i8.q1), t2.f1 -> Limit @@ -4339,7 +4339,7 @@ where tt1.f1 = ss1.c0; -> Seq Scan on public.text_tbl tt4 Output: tt4.f1 Filter: (tt4.f1 = 'foo'::text) - -> Result Cache + -> Memoize Output: ss1.c0 Cache Key: tt4.f1 -> Subquery Scan on ss1 @@ -5028,7 +5028,7 @@ explain (costs off) Aggregate -> Nested Loop -> Seq Scan on tenk1 a - -> Result Cache + -> Memoize Cache Key: a.two -> Function Scan on generate_series g (6 rows) @@ -5040,7 +5040,7 @@ explain (costs off) Aggregate -> Nested Loop -> Seq Scan on tenk1 a - -> Result Cache + -> Memoize Cache Key: a.two -> Function Scan on generate_series g (6 rows) @@ -5053,7 +5053,7 @@ explain (costs off) Aggregate -> Nested Loop -> Seq Scan on tenk1 a - -> Result Cache + -> Memoize Cache Key: a.two -> Function Scan on generate_series g (6 rows) @@ -5115,7 +5115,7 @@ explain (costs off) -> Nested Loop -> Index Only Scan using tenk1_unique1 on tenk1 a -> Values Scan on "*VALUES*" - -> Result Cache + -> Memoize Cache Key: "*VALUES*".column1 -> Index Only Scan using tenk1_unique2 on tenk1 b Index Cond: (unique2 = "*VALUES*".column1) diff --git a/src/test/regress/expected/resultcache.out b/src/test/regress/expected/memoize.out similarity index 88% rename from src/test/regress/expected/resultcache.out rename to src/test/regress/expected/memoize.out index 5b5dd6838e..9a025c4a7a 100644 --- a/src/test/regress/expected/resultcache.out +++ b/src/test/regress/expected/memoize.out @@ -1,9 +1,9 @@ --- Perform tests on the Result Cache node. --- The cache hits/misses/evictions from the Result Cache node can vary between +-- Perform tests on the Memoize node. +-- The cache hits/misses/evictions from the Memoize node can vary between -- machines. Let's just replace the number with an 'N'. In order to allow us -- to perform validation when the measure was zero, we replace a zero value -- with "Zero". All other numbers are replaced with 'N'. -create function explain_resultcache(query text, hide_hitmiss bool) returns setof text +create function explain_memoize(query text, hide_hitmiss bool) returns setof text language plpgsql as $$ declare @@ -28,21 +28,21 @@ begin end loop; end; $$; --- Ensure we get a result cache on the inner side of the nested loop +-- Ensure we get a memoize node on the inner side of the nested loop SET enable_hashjoin TO off; SET enable_bitmapscan TO off; -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty WHERE t2.unique1 < 1000;', false); - explain_resultcache + explain_memoize ------------------------------------------------------------------------------------------- Aggregate (actual rows=1 loops=N) -> Nested Loop (actual rows=1000 loops=N) -> Seq Scan on tenk1 t2 (actual rows=1000 loops=N) Filter: (unique1 < 1000) Rows Removed by Filter: 9000 - -> Result Cache (actual rows=1 loops=N) + -> Memoize (actual rows=1 loops=N) Cache Key: t2.twenty Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) @@ -60,18 +60,18 @@ WHERE t2.unique1 < 1000; (1 row) -- Try with LATERAL joins -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 WHERE t1.unique1 < 1000;', false); - explain_resultcache + explain_memoize ------------------------------------------------------------------------------------------- Aggregate (actual rows=1 loops=N) -> Nested Loop (actual rows=1000 loops=N) -> Seq Scan on tenk1 t1 (actual rows=1000 loops=N) Filter: (unique1 < 1000) Rows Removed by Filter: 9000 - -> Result Cache (actual rows=1 loops=N) + -> Memoize (actual rows=1 loops=N) Cache Key: t1.twenty Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB -> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N) @@ -94,18 +94,18 @@ SET enable_mergejoin TO off; -- Ensure we get some evictions. We're unable to validate the hits and misses -- here as the number of entries that fit in the cache at once will vary -- between different machines. -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand WHERE t2.unique1 < 1200;', true); - explain_resultcache + explain_memoize ------------------------------------------------------------------------------------------- Aggregate (actual rows=1 loops=N) -> Nested Loop (actual rows=1200 loops=N) -> Seq Scan on tenk1 t2 (actual rows=1200 loops=N) Filter: (unique1 < 1200) Rows Removed by Filter: 8800 - -> Result Cache (actual rows=1 loops=N) + -> Memoize (actual rows=1 loops=N) Cache Key: t2.thousand Hits: N Misses: N Evictions: N Overflows: 0 Memory Usage: NkB -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) @@ -117,7 +117,7 @@ RESET enable_mergejoin; RESET work_mem; RESET enable_bitmapscan; RESET enable_hashjoin; --- Test parallel plans with Result Cache. +-- Test parallel plans with Memoize SET min_parallel_table_scan_size TO 0; SET parallel_setup_cost TO 0; SET parallel_tuple_cost TO 0; @@ -138,7 +138,7 @@ WHERE t1.unique1 < 1000; Recheck Cond: (unique1 < 1000) -> Bitmap Index Scan on tenk1_unique1 Index Cond: (unique1 < 1000) - -> Result Cache + -> Memoize Cache Key: t1.twenty -> Index Only Scan using tenk1_unique1 on tenk1 t2 Index Cond: (unique1 = t1.twenty) diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out index 2c62e4a7a6..7555764c77 100644 --- a/src/test/regress/expected/partition_prune.out +++ b/src/test/regress/expected/partition_prune.out @@ -2085,7 +2085,7 @@ create index ab_a3_b2_a_idx on ab_a3_b2 (a); create index ab_a3_b3_a_idx on ab_a3_b3 (a); set enable_hashjoin = 0; set enable_mergejoin = 0; -set enable_resultcache = 0; +set enable_memoize = 0; select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); explain_parallel_append -------------------------------------------------------------------------------------------------------- @@ -2254,7 +2254,7 @@ select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on reset enable_hashjoin; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; reset parallel_setup_cost; reset parallel_tuple_cost; reset min_parallel_table_scan_size; diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out index c7986fb7fc..30615dd6bc 100644 --- a/src/test/regress/expected/subselect.out +++ b/src/test/regress/expected/subselect.out @@ -1097,7 +1097,7 @@ where o.ten = 1; -> Nested Loop -> Seq Scan on onek o Filter: (ten = 1) - -> Result Cache + -> Memoize Cache Key: o.four -> CTE Scan on x CTE x diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out index 0bb558d93c..6e54f3e15e 100644 --- a/src/test/regress/expected/sysviews.out +++ b/src/test/regress/expected/sysviews.out @@ -104,6 +104,7 @@ select name, setting from pg_settings where name like 'enable%'; enable_indexonlyscan | on enable_indexscan | on enable_material | on + enable_memoize | on enable_mergejoin | on enable_nestloop | on enable_parallel_append | on @@ -111,7 +112,6 @@ select name, setting from pg_settings where name like 'enable%'; enable_partition_pruning | on enable_partitionwise_aggregate | off enable_partitionwise_join | off - enable_resultcache | on enable_seqscan | on enable_sort | on enable_tidscan | on diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index 22b0d3584d..7be89178f0 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -120,7 +120,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr # ---------- # Another group of parallel tests # ---------- -test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression resultcache +test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize # event triggers cannot run concurrently with any test that runs DDL # oidjoins is read-only, though, and should run late for best coverage diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql index eb80a2fe06..ed2d6b3bdf 100644 --- a/src/test/regress/sql/aggregates.sql +++ b/src/test/regress/sql/aggregates.sql @@ -1098,11 +1098,11 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) -- Make sure that generation of HashAggregate for uniqification purposes -- does not lead to array overflow due to unexpected duplicate hash keys -- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek); -reset enable_resultcache; +reset enable_memoize; -- -- Hash Aggregation Spill tests diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 2a0e2d12d8..cb1c230914 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -550,7 +550,7 @@ reset enable_nestloop; set work_mem to '64kB'; set enable_mergejoin to off; -set enable_resultcache to off; +set enable_memoize to off; explain (costs off) select count(*) from tenk1 a, tenk1 b @@ -560,7 +560,7 @@ select count(*) from tenk1 a, tenk1 b reset work_mem; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; -- -- regression test for 8.2 bug with improper re-ordering of left joins diff --git a/src/test/regress/sql/resultcache.sql b/src/test/regress/sql/memoize.sql similarity index 88% rename from src/test/regress/sql/resultcache.sql rename to src/test/regress/sql/memoize.sql index 43a70d56a5..548cc3eee3 100644 --- a/src/test/regress/sql/resultcache.sql +++ b/src/test/regress/sql/memoize.sql @@ -1,10 +1,10 @@ --- Perform tests on the Result Cache node. +-- Perform tests on the Memoize node. --- The cache hits/misses/evictions from the Result Cache node can vary between +-- The cache hits/misses/evictions from the Memoize node can vary between -- machines. Let's just replace the number with an 'N'. In order to allow us -- to perform validation when the measure was zero, we replace a zero value -- with "Zero". All other numbers are replaced with 'N'. -create function explain_resultcache(query text, hide_hitmiss bool) returns setof text +create function explain_memoize(query text, hide_hitmiss bool) returns setof text language plpgsql as $$ declare @@ -30,11 +30,11 @@ begin end; $$; --- Ensure we get a result cache on the inner side of the nested loop +-- Ensure we get a memoize node on the inner side of the nested loop SET enable_hashjoin TO off; SET enable_bitmapscan TO off; -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty WHERE t2.unique1 < 1000;', false); @@ -45,7 +45,7 @@ INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty WHERE t2.unique1 < 1000; -- Try with LATERAL joins -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 WHERE t1.unique1 < 1000;', false); @@ -61,7 +61,7 @@ SET enable_mergejoin TO off; -- Ensure we get some evictions. We're unable to validate the hits and misses -- here as the number of entries that fit in the cache at once will vary -- between different machines. -SELECT explain_resultcache(' +SELECT explain_memoize(' SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand WHERE t2.unique1 < 1200;', true); @@ -70,7 +70,7 @@ RESET work_mem; RESET enable_bitmapscan; RESET enable_hashjoin; --- Test parallel plans with Result Cache. +-- Test parallel plans with Memoize SET min_parallel_table_scan_size TO 0; SET parallel_setup_cost TO 0; SET parallel_tuple_cost TO 0; diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql index 16c8dc5f1f..d70bd8610c 100644 --- a/src/test/regress/sql/partition_prune.sql +++ b/src/test/regress/sql/partition_prune.sql @@ -515,7 +515,7 @@ create index ab_a3_b3_a_idx on ab_a3_b3 (a); set enable_hashjoin = 0; set enable_mergejoin = 0; -set enable_resultcache = 0; +set enable_memoize = 0; select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); @@ -534,7 +534,7 @@ select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on reset enable_hashjoin; reset enable_mergejoin; -reset enable_resultcache; +reset enable_memoize; reset parallel_setup_cost; reset parallel_tuple_cost; reset min_parallel_table_scan_size; diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 9a0936ead1..b287c29f64 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1421,6 +1421,13 @@ Material MaterialPath MaterialState MdfdVec +Memoize +MemoizeEntry +MemoizeInstrumentation +MemoizeKey +MemoizePath +MemoizeState +MemoizeTuple MemoryContext MemoryContextCallback MemoryContextCallbackFunction @@ -2222,13 +2229,6 @@ RestoreOptions RestorePass RestrictInfo Result -ResultCache -ResultCacheEntry -ResultCacheInstrumentation -ResultCacheKey -ResultCachePath -ResultCacheState -ResultCacheTuple ResultRelInfo ResultState ReturnSetInfo @@ -2384,10 +2384,10 @@ SharedInvalSmgrMsg SharedInvalSnapshotMsg SharedInvalidationMessage SharedJitInstrumentation +SharedMemoizeInfo SharedRecordTableEntry SharedRecordTableKey SharedRecordTypmodRegistry -SharedResultCacheInfo SharedSortInfo SharedTuplestore SharedTuplestoreAccessor @@ -3272,6 +3272,8 @@ mbcharacter_incrementer mbdisplaylen_converter mblen_converter mbstr_verifier +memoize_hash +memoize_iterator metastring mix_data_t mixedStruct @@ -3478,8 +3480,6 @@ remoteDep rendezvousHashEntry replace_rte_variables_callback replace_rte_variables_context -resultcache_hash -resultcache_iterator ret_type rewind_source rewrite_event