postgresql/src/backend/executor/nodeHash.c

766 lines
20 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* nodeHash.c
* Routines to hash relations for hashjoin
*
2002-06-20 22:29:54 +02:00
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.70 2002/12/13 19:45:52 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/*
* INTERFACE ROUTINES
* ExecHash - generate an in-memory hash table of the relation
* ExecInitHash - initialize node and subnodes
* ExecEndHash - shutdown node and subnodes
*/
#include "postgres.h"
#include <math.h>
#include "access/hash.h"
#include "executor/execdebug.h"
#include "executor/nodeHash.h"
#include "executor/nodeHashjoin.h"
#include "miscadmin.h"
#include "parser/parse_expr.h"
#include "utils/memutils.h"
#include "utils/lsyscache.h"
/* ----------------------------------------------------------------
* ExecHash
*
* build hash table for hashjoin, all do partitioning if more
* than one batches are required.
* ----------------------------------------------------------------
*/
TupleTableSlot *
ExecHash(HashState *node)
{
EState *estate;
PlanState *outerNode;
List *hashkeys;
HashJoinTable hashtable;
TupleTableSlot *slot;
ExprContext *econtext;
int nbatch;
int i;
/*
* get state info from node
*/
estate = node->ps.state;
outerNode = outerPlanState(node);
hashtable = node->hashtable;
if (hashtable == NULL)
elog(ERROR, "ExecHash: hash table is NULL.");
nbatch = hashtable->nbatch;
if (nbatch > 0)
{
/*
* Open temp files for inner batches, if needed. Note that file
* buffers are palloc'd in regular executor context.
*/
for (i = 0; i < nbatch; i++)
hashtable->innerBatchFile[i] = BufFileCreateTemp();
}
/*
* set expression context
*/
hashkeys = node->hashkeys;
econtext = node->ps.ps_ExprContext;
/*
* get all inner tuples and insert into the hash table (or temp files)
*/
for (;;)
{
slot = ExecProcNode(outerNode);
if (TupIsNull(slot))
break;
econtext->ecxt_innertuple = slot;
ExecHashTableInsert(hashtable, econtext, hashkeys);
ExecClearTuple(slot);
}
/*
* Return the slot so that we have the tuple descriptor when we need
* to save/restore them. -Jeff 11 July 1991
*/
return slot;
}
/* ----------------------------------------------------------------
* ExecInitHash
*
* Init routine for Hash node
* ----------------------------------------------------------------
*/
HashState *
ExecInitHash(Hash *node, EState *estate)
{
HashState *hashstate;
SO_printf("ExecInitHash: initializing hash node\n");
/*
* create state structure
*/
hashstate = makeNode(HashState);
hashstate->ps.plan = (Plan *) node;
hashstate->ps.state = estate;
hashstate->hashtable = NULL;
/*
* Miscellaneous initialization
*
* create expression context for node
*/
ExecAssignExprContext(estate, &hashstate->ps);
#define HASH_NSLOTS 1
/*
* initialize our result slot
*/
ExecInitResultTupleSlot(estate, &hashstate->ps);
/*
* initialize child expressions
*/
hashstate->ps.targetlist = (List *)
ExecInitExpr((Expr *) node->plan.targetlist,
(PlanState *) hashstate);
hashstate->ps.qual = (List *)
ExecInitExpr((Expr *) node->plan.qual,
(PlanState *) hashstate);
/*
* initialize child nodes
*/
outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate);
/*
* initialize tuple type. no need to initialize projection info
* because this node doesn't do projections
*/
ExecAssignResultTypeFromOuterPlan(&hashstate->ps);
hashstate->ps.ps_ProjInfo = NULL;
return hashstate;
}
int
ExecCountSlotsHash(Hash *node)
{
return ExecCountSlotsNode(outerPlan(node)) +
ExecCountSlotsNode(innerPlan(node)) +
HASH_NSLOTS;
}
/* ---------------------------------------------------------------
* ExecEndHash
*
* clean up routine for Hash node
* ----------------------------------------------------------------
*/
void
ExecEndHash(HashState *node)
{
PlanState *outerPlan;
/*
* free projection info. no need to free result type info because
* that came from the outer plan...
*/
ExecFreeProjectionInfo(&node->ps);
ExecFreeExprContext(&node->ps);
/*
* shut down the subplan
*/
outerPlan = outerPlanState(node);
ExecEndNode(outerPlan);
}
/* ----------------------------------------------------------------
* ExecHashTableCreate
*
* create a hashtable in shared memory for hashjoin.
* ----------------------------------------------------------------
*/
HashJoinTable
ExecHashTableCreate(Hash *node)
{
HashJoinTable hashtable;
Plan *outerNode;
int totalbuckets;
int nbuckets;
int nbatch;
int nkeys;
int i;
List *hk;
1999-05-25 18:15:34 +02:00
MemoryContext oldcxt;
/*
* Get information about the size of the relation to be hashed (it's
* the "outer" subtree of this node, but the inner relation of the
* hashjoin). Compute the appropriate size of the hash table.
*/
outerNode = outerPlan(node);
ExecChooseHashTableSize(outerNode->plan_rows, outerNode->plan_width,
&totalbuckets, &nbuckets, &nbatch);
1999-05-25 18:15:34 +02:00
#ifdef HJDEBUG
1999-05-25 18:15:34 +02:00
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
nbatch, totalbuckets, nbuckets);
#endif
/*
* Initialize the hash table control block.
*
* The hashtable control block is just palloc'd from the executor's
* per-query memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashTableData));
hashtable->nbuckets = nbuckets;
hashtable->totalbuckets = totalbuckets;
hashtable->buckets = NULL;
hashtable->nbatch = nbatch;
hashtable->curbatch = 0;
hashtable->innerBatchFile = NULL;
hashtable->outerBatchFile = NULL;
hashtable->innerBatchSize = NULL;
hashtable->outerBatchSize = NULL;
/*
* Get info about the datatypes of the hash keys.
*/
nkeys = length(node->hashkeys);
hashtable->typLens = (int16 *) palloc(nkeys * sizeof(int16));
hashtable->typByVals = (bool *) palloc(nkeys * sizeof(bool));
i = 0;
foreach(hk, node->hashkeys)
{
get_typlenbyval(exprType(lfirst(hk)),
&hashtable->typLens[i],
&hashtable->typByVals[i]);
i++;
}
/*
* Create temporary memory contexts in which to keep the hashtable
* working storage. See notes in executor/hashjoin.h.
*/
hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
"HashTableContext",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
"HashBatchContext",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* Allocate data that will live for the life of the hashjoin */
oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
if (nbatch > 0)
{
/*
* allocate and initialize the file arrays in hashCxt
*/
hashtable->innerBatchFile = (BufFile **)
palloc(nbatch * sizeof(BufFile *));
hashtable->outerBatchFile = (BufFile **)
palloc(nbatch * sizeof(BufFile *));
hashtable->innerBatchSize = (long *)
palloc(nbatch * sizeof(long));
hashtable->outerBatchSize = (long *)
palloc(nbatch * sizeof(long));
for (i = 0; i < nbatch; i++)
{
hashtable->innerBatchFile[i] = NULL;
hashtable->outerBatchFile[i] = NULL;
hashtable->innerBatchSize[i] = 0;
hashtable->outerBatchSize[i] = 0;
}
/* The files will not be opened until later... */
}
1999-05-25 18:15:34 +02:00
/*
* Prepare context for the first-scan space allocations; allocate the
1999-05-25 18:15:34 +02:00
* hashbucket array therein, and set each bucket "empty".
*/
MemoryContextSwitchTo(hashtable->batchCxt);
hashtable->buckets = (HashJoinTuple *)
palloc(nbuckets * sizeof(HashJoinTuple));
if (hashtable->buckets == NULL)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
hashtable->buckets[i] = NULL;
MemoryContextSwitchTo(oldcxt);
1998-09-01 05:29:17 +02:00
return hashtable;
}
/*
* Compute appropriate size for hashtable given the estimated size of the
* relation to be hashed (number of rows and average row width).
*
* Caution: the input is only the planner's estimates, and so can't be
* trusted too far. Apply a healthy fudge factor.
*
* This is exported so that the planner's costsize.c can use it.
*/
/* Target bucket loading (tuples per bucket) */
#define NTUP_PER_BUCKET 10
/* Fudge factor to allow for inaccuracy of input estimates */
#define FUDGE_FAC 2.0
void
ExecChooseHashTableSize(double ntuples, int tupwidth,
int *virtualbuckets,
int *physicalbuckets,
int *numbatches)
{
int tupsize;
double inner_rel_bytes;
double hash_table_bytes;
int nbatch;
int nbuckets;
int totalbuckets;
int bucketsize;
/* Force a plausible relation size if no info */
if (ntuples <= 0.0)
ntuples = 1000.0;
/*
* Estimate tupsize based on footprint of tuple in hashtable... but
* what about palloc overhead?
*/
tupsize = MAXALIGN(tupwidth) + MAXALIGN(sizeof(HashJoinTupleData));
inner_rel_bytes = ntuples * tupsize * FUDGE_FAC;
/*
* Target hashtable size is SortMem kilobytes, but not less than
* sqrt(estimated inner rel size), so as to avoid horrible
* performance.
*/
hash_table_bytes = sqrt(inner_rel_bytes);
if (hash_table_bytes < (SortMem * 1024L))
hash_table_bytes = SortMem * 1024L;
/*
* Count the number of hash buckets we want for the whole relation,
* for an average bucket load of NTUP_PER_BUCKET (per virtual
* bucket!).
*/
totalbuckets = (int) ceil(ntuples * FUDGE_FAC / NTUP_PER_BUCKET);
/*
* Count the number of buckets we think will actually fit in the
* target memory size, at a loading of NTUP_PER_BUCKET (physical
* buckets). NOTE: FUDGE_FAC here determines the fraction of the
* hashtable space reserved to allow for nonuniform distribution of
* hash values. Perhaps this should be a different number from the
* other uses of FUDGE_FAC, but since we have no real good way to pick
* either one...
*/
bucketsize = NTUP_PER_BUCKET * tupsize;
nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
if (nbuckets <= 0)
nbuckets = 1;
if (totalbuckets <= nbuckets)
{
/*
* We have enough space, so no batching. In theory we could even
* reduce nbuckets, but since that could lead to poor behavior if
* estimated ntuples is much less than reality, it seems better to
* make more buckets instead of fewer.
*/
totalbuckets = nbuckets;
nbatch = 0;
}
else
{
/*
* Need to batch; compute how many batches we want to use. Note
* that nbatch doesn't have to have anything to do with the ratio
* totalbuckets/nbuckets; in fact, it is the number of groups we
* will use for the part of the data that doesn't fall into the
* first nbuckets hash buckets.
*/
nbatch = (int) ceil((inner_rel_bytes - hash_table_bytes) /
hash_table_bytes);
if (nbatch <= 0)
nbatch = 1;
}
/*
* Now, totalbuckets is the number of (virtual) hashbuckets for the
* whole relation, and nbuckets is the number of physical hashbuckets
* we will use in the first pass. Data falling into the first
* nbuckets virtual hashbuckets gets handled in the first pass;
* everything else gets divided into nbatch batches to be processed in
* additional passes.
*/
*virtualbuckets = totalbuckets;
*physicalbuckets = nbuckets;
*numbatches = nbatch;
}
/* ----------------------------------------------------------------
* ExecHashTableDestroy
*
* destroy a hash table
* ----------------------------------------------------------------
*/
void
ExecHashTableDestroy(HashJoinTable hashtable)
{
int i;
/* Make sure all the temp files are closed */
for (i = 0; i < hashtable->nbatch; i++)
{
if (hashtable->innerBatchFile[i])
BufFileClose(hashtable->innerBatchFile[i]);
if (hashtable->outerBatchFile[i])
BufFileClose(hashtable->outerBatchFile[i]);
}
/* Release working memory (batchCxt is a child, so it goes away too) */
MemoryContextDelete(hashtable->hashCxt);
/* And drop the control block */
pfree(hashtable);
}
/* ----------------------------------------------------------------
* ExecHashTableInsert
*
* insert a tuple into the hash table depending on the hash value
* it may just go to a tmp file for other batches
* ----------------------------------------------------------------
*/
void
ExecHashTableInsert(HashJoinTable hashtable,
ExprContext *econtext,
List *hashkeys)
{
int bucketno = ExecHashGetBucket(hashtable, econtext, hashkeys);
TupleTableSlot *slot = econtext->ecxt_innertuple;
HeapTuple heapTuple = slot->val;
/*
* decide whether to put the tuple in the hash table or a tmp file
*/
if (bucketno < hashtable->nbuckets)
{
/*
* put the tuple in hash table
*/
1999-05-25 18:15:34 +02:00
HashJoinTuple hashTuple;
int hashTupleSize;
hashTupleSize = MAXALIGN(sizeof(*hashTuple)) + heapTuple->t_len;
hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
hashTupleSize);
if (hashTuple == NULL)
elog(ERROR, "Insufficient memory for hash table.");
1999-05-25 18:15:34 +02:00
memcpy((char *) &hashTuple->htup,
(char *) heapTuple,
sizeof(hashTuple->htup));
hashTuple->htup.t_datamcxt = hashtable->batchCxt;
hashTuple->htup.t_data = (HeapTupleHeader)
(((char *) hashTuple) + MAXALIGN(sizeof(*hashTuple)));
memcpy((char *) hashTuple->htup.t_data,
(char *) heapTuple->t_data,
heapTuple->t_len);
hashTuple->next = hashtable->buckets[bucketno];
hashtable->buckets[bucketno] = hashTuple;
}
else
{
/*
* put the tuple into a tmp file for other batches
*/
1999-05-25 18:15:34 +02:00
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
hashtable->innerBatchSize[batchno]++;
ExecHashJoinSaveTuple(heapTuple,
hashtable->innerBatchFile[batchno]);
}
}
/* ----------------------------------------------------------------
* ExecHashGetBucket
*
* Get the hash value for a tuple
* ----------------------------------------------------------------
*/
int
ExecHashGetBucket(HashJoinTable hashtable,
ExprContext *econtext,
List *hashkeys)
{
uint32 hashkey = 0;
int bucketno;
List *hk;
int i = 0;
MemoryContext oldContext;
/*
* We reset the eval context each time to reclaim any memory leaked in
* the hashkey expressions or ComputeHashFunc itself.
*/
ResetExprContext(econtext);
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
foreach(hk, hashkeys)
{
Datum keyval;
bool isNull;
/* rotate hashkey left 1 bit at each step */
hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
/*
* Get the join attribute value of the tuple
*/
keyval = ExecEvalExpr((ExprState *) lfirst(hk),
econtext, &isNull, NULL);
/*
* Compute the hash function
*/
if (!isNull) /* treat nulls as having hash key 0 */
{
hashkey ^= ComputeHashFunc(keyval,
(int) hashtable->typLens[i],
hashtable->typByVals[i]);
}
i++;
}
bucketno = hashkey % (uint32) hashtable->totalbuckets;
#ifdef HJDEBUG
if (bucketno >= hashtable->nbuckets)
printf("hash(%u) = %d SAVED\n", hashkey, bucketno);
else
printf("hash(%u) = %d\n", hashkey, bucketno);
#endif
MemoryContextSwitchTo(oldContext);
1998-09-01 05:29:17 +02:00
return bucketno;
}
/* ----------------------------------------------------------------
* ExecScanHashBucket
*
* scan a hash bucket of matches
* ----------------------------------------------------------------
*/
HeapTuple
ExecScanHashBucket(HashJoinState *hjstate,
List *hjclauses,
ExprContext *econtext)
{
1999-05-25 18:15:34 +02:00
HashJoinTable hashtable = hjstate->hj_HashTable;
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1999-05-25 18:15:34 +02:00
/*
* hj_CurTuple is NULL to start scanning a new bucket, or the address
* of the last tuple returned from the current bucket.
*/
if (hashTuple == NULL)
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
else
hashTuple = hashTuple->next;
while (hashTuple != NULL)
{
1999-05-25 18:15:34 +02:00
HeapTuple heapTuple = &hashTuple->htup;
TupleTableSlot *inntuple;
/* insert hashtable's tuple into exec slot so ExecQual sees it */
inntuple = ExecStoreTuple(heapTuple, /* tuple to store */
hjstate->hj_HashTupleSlot, /* slot */
InvalidBuffer,
false); /* do not pfree this tuple */
econtext->ecxt_innertuple = inntuple;
/* reset temp memory each time to avoid leaks from qual expression */
ResetExprContext(econtext);
if (ExecQual(hjclauses, econtext, false))
{
hjstate->hj_CurTuple = hashTuple;
return heapTuple;
}
hashTuple = hashTuple->next;
}
/*
* no match
*/
return NULL;
}
/* ----------------------------------------------------------------
* ComputeHashFunc
*
* the hash function for hash joins (also used for hash aggregation)
*
* XXX this probably ought to be replaced with datatype-specific
* hash functions, such as those already implemented for hash indexes.
* ----------------------------------------------------------------
*/
uint32
ComputeHashFunc(Datum key, int typLen, bool byVal)
{
unsigned char *k;
1999-05-25 18:15:34 +02:00
if (byVal)
{
/*
* If it's a by-value data type, just hash the whole Datum value.
2002-09-04 22:31:48 +02:00
* This assumes that datatypes narrower than Datum are
* consistently padded (either zero-extended or sign-extended, but
* not random bits) to fill Datum; see the XXXGetDatum macros in
* postgres.h. NOTE: it would not work to do hash_any(&key, len)
* since this would get the wrong bytes on a big-endian machine.
*/
k = (unsigned char *) &key;
typLen = sizeof(Datum);
1999-05-25 18:15:34 +02:00
}
else
{
if (typLen > 0)
{
/* fixed-width pass-by-reference type */
k = (unsigned char *) DatumGetPointer(key);
}
else if (typLen == -1)
{
/*
2002-09-04 22:31:48 +02:00
* It's a varlena type, so 'key' points to a "struct varlena".
* NOTE: VARSIZE returns the "real" data length plus the
* sizeof the "vl_len" attribute of varlena (the length
* information). 'key' points to the beginning of the varlena
* struct, so we have to use "VARDATA" to find the beginning
* of the "real" data. Also, we have to be careful to detoast
* the datum if it's toasted. (We don't worry about freeing
* the detoasted copy; that happens for free when the
* per-tuple memory context is reset in ExecHashGetBucket.)
*/
struct varlena *vkey = PG_DETOAST_DATUM(key);
typLen = VARSIZE(vkey) - VARHDRSZ;
k = (unsigned char *) VARDATA(vkey);
}
else if (typLen == -2)
{
/* It's a null-terminated C string */
typLen = strlen(DatumGetCString(key)) + 1;
k = (unsigned char *) DatumGetPointer(key);
}
else
{
elog(ERROR, "ComputeHashFunc: Invalid typLen %d", typLen);
k = NULL; /* keep compiler quiet */
}
}
return DatumGetUInt32(hash_any(k, typLen));
}
/* ----------------------------------------------------------------
* ExecHashTableReset
*
* reset hash table header for new batch
*
* ntuples is the number of tuples in the inner relation's batch
* (which we currently don't actually use...)
* ----------------------------------------------------------------
*/
void
ExecHashTableReset(HashJoinTable hashtable, long ntuples)
{
1999-05-25 18:15:34 +02:00
MemoryContext oldcxt;
int nbuckets = hashtable->nbuckets;
int i;
/*
* Release all the hash buckets and tuples acquired in the prior pass,
* and reinitialize the context for a new pass.
*/
MemoryContextReset(hashtable->batchCxt);
oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
/*
1999-05-25 18:15:34 +02:00
* We still use the same number of physical buckets as in the first
* pass. (It could be different; but we already decided how many
* buckets would be appropriate for the allowed memory, so stick with
* that number.) We MUST set totalbuckets to equal nbuckets, because
* from now on no tuples will go out to temp files; there are no more
* virtual buckets, only real buckets. (This implies that tuples will
* go into different bucket numbers than they did on the first pass,
* but that's OK.)
*/
hashtable->totalbuckets = nbuckets;
/* Reallocate and reinitialize the hash bucket headers. */
hashtable->buckets = (HashJoinTuple *)
palloc(nbuckets * sizeof(HashJoinTuple));
if (hashtable->buckets == NULL)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
hashtable->buckets[i] = NULL;
MemoryContextSwitchTo(oldcxt);
}
void
ExecReScanHash(HashState *node, ExprContext *exprCtxt)
{
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->lefttree, exprCtxt);
}