2011-12-17 22:41:16 +01:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* spgscan.c
|
|
|
|
* routines for scanning SP-GiST indexes
|
|
|
|
*
|
|
|
|
*
|
2019-01-02 18:44:25 +01:00
|
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
2011-12-17 22:41:16 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
|
|
|
* src/backend/access/spgist/spgscan.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
2019-12-25 02:23:39 +01:00
|
|
|
#include "access/indexgenam.h"
|
2011-12-17 22:41:16 +01:00
|
|
|
#include "access/relscan.h"
|
|
|
|
#include "access/spgist_private.h"
|
|
|
|
#include "miscadmin.h"
|
|
|
|
#include "storage/bufmgr.h"
|
|
|
|
#include "utils/datum.h"
|
2018-09-19 00:54:10 +02:00
|
|
|
#include "utils/float.h"
|
|
|
|
#include "utils/lsyscache.h"
|
2011-12-17 22:41:16 +01:00
|
|
|
#include "utils/memutils.h"
|
2012-08-31 23:04:31 +02:00
|
|
|
#include "utils/rel.h"
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2012-03-11 21:29:04 +01:00
|
|
|
typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr,
|
2018-09-19 00:54:10 +02:00
|
|
|
Datum leafValue, bool isNull, bool recheck,
|
|
|
|
bool recheckDistances, double *distances);
|
2012-03-11 21:29:04 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/*
|
|
|
|
* Pairing heap comparison function for the SpGistSearchItem queue.
|
|
|
|
* KNN-searches currently only support NULLS LAST. So, preserve this logic
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
|
|
|
|
const pairingheap_node *b, void *arg)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2019-05-22 18:55:34 +02:00
|
|
|
const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
|
|
|
|
const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
|
2018-09-19 00:54:10 +02:00
|
|
|
SpGistScanOpaque so = (SpGistScanOpaque) arg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (sa->isNull)
|
|
|
|
{
|
|
|
|
if (!sb->isNull)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
else if (sb->isNull)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Order according to distance comparison */
|
2019-09-25 00:47:36 +02:00
|
|
|
for (i = 0; i < so->numberOfNonNullOrderBys; i++)
|
2018-09-19 00:54:10 +02:00
|
|
|
{
|
|
|
|
if (isnan(sa->distances[i]) && isnan(sb->distances[i]))
|
|
|
|
continue; /* NaN == NaN */
|
|
|
|
if (isnan(sa->distances[i]))
|
|
|
|
return -1; /* NaN > number */
|
|
|
|
if (isnan(sb->distances[i]))
|
|
|
|
return 1; /* number < NaN */
|
|
|
|
if (sa->distances[i] != sb->distances[i])
|
|
|
|
return (sa->distances[i] < sb->distances[i]) ? 1 : -1;
|
|
|
|
}
|
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/* Leaf items go before inner pages, to ensure a depth-first search */
|
|
|
|
if (sa->isLeaf && !sb->isLeaf)
|
|
|
|
return 1;
|
|
|
|
if (!sa->isLeaf && sb->isLeaf)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
|
|
|
|
static void
|
2019-05-22 18:55:34 +02:00
|
|
|
spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2017-12-22 11:33:16 +01:00
|
|
|
if (!so->state.attLeafType.attbyval &&
|
2018-09-19 00:54:10 +02:00
|
|
|
DatumGetPointer(item->value) != NULL)
|
|
|
|
pfree(DatumGetPointer(item->value));
|
2016-03-30 17:29:28 +02:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (item->traversalValue)
|
|
|
|
pfree(item->traversalValue);
|
|
|
|
|
|
|
|
pfree(item);
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/*
|
|
|
|
* Add SpGistSearchItem to queue
|
|
|
|
*
|
|
|
|
* Called in queue context
|
|
|
|
*/
|
2011-12-17 22:41:16 +01:00
|
|
|
static void
|
2019-05-22 18:55:34 +02:00
|
|
|
spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
pairingheap_add(so->scanQueue, &item->phNode);
|
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
static SpGistSearchItem *
|
|
|
|
spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances)
|
|
|
|
{
|
|
|
|
/* allocate distance array only for non-NULL items */
|
|
|
|
SpGistSearchItem *item =
|
2019-09-19 20:30:19 +02:00
|
|
|
palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
|
2018-09-19 00:54:10 +02:00
|
|
|
|
|
|
|
item->isNull = isnull;
|
|
|
|
|
2019-09-19 20:30:19 +02:00
|
|
|
if (!isnull && so->numberOfNonNullOrderBys > 0)
|
2018-09-19 00:54:10 +02:00
|
|
|
memcpy(item->distances, distances,
|
2019-09-19 20:30:19 +02:00
|
|
|
sizeof(item->distances[0]) * so->numberOfNonNullOrderBys);
|
2018-09-19 00:54:10 +02:00
|
|
|
|
|
|
|
return item;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
spgAddStartItem(SpGistScanOpaque so, bool isnull)
|
|
|
|
{
|
|
|
|
SpGistSearchItem *startEntry =
|
|
|
|
spgAllocSearchItem(so, isnull, so->zeroDistances);
|
|
|
|
|
|
|
|
ItemPointerSet(&startEntry->heapPtr,
|
|
|
|
isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO,
|
|
|
|
FirstOffsetNumber);
|
|
|
|
startEntry->isLeaf = false;
|
|
|
|
startEntry->level = 0;
|
|
|
|
startEntry->value = (Datum) 0;
|
|
|
|
startEntry->traversalValue = NULL;
|
|
|
|
startEntry->recheck = false;
|
|
|
|
startEntry->recheckDistances = false;
|
|
|
|
|
|
|
|
spgAddSearchItemToQueue(so, startEntry);
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
2011-12-19 20:58:41 +01:00
|
|
|
/*
|
2018-09-19 00:54:10 +02:00
|
|
|
* Initialize queue to search the root page, resetting
|
2011-12-19 20:58:41 +01:00
|
|
|
* any previously active scan
|
|
|
|
*/
|
2011-12-17 22:41:16 +01:00
|
|
|
static void
|
|
|
|
resetSpGistScanOpaque(SpGistScanOpaque so)
|
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
MemoryContext oldCtx;
|
2012-03-11 00:36:49 +01:00
|
|
|
|
2018-09-11 19:14:19 +02:00
|
|
|
MemoryContextReset(so->traversalCxt);
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
oldCtx = MemoryContextSwitchTo(so->traversalCxt);
|
|
|
|
|
|
|
|
/* initialize queue only for distance-ordered scans */
|
|
|
|
so->scanQueue = pairingheap_allocate(pairingheap_SpGistSearchItem_cmp, so);
|
|
|
|
|
2012-03-11 21:29:04 +01:00
|
|
|
if (so->searchNulls)
|
2018-09-19 00:54:10 +02:00
|
|
|
/* Add a work item to scan the null index entries */
|
|
|
|
spgAddStartItem(so, true);
|
2012-03-11 00:36:49 +01:00
|
|
|
|
|
|
|
if (so->searchNonNulls)
|
2018-09-19 00:54:10 +02:00
|
|
|
/* Add a work item to scan the non-null index entries */
|
|
|
|
spgAddStartItem(so, false);
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldCtx);
|
|
|
|
|
|
|
|
if (so->numberOfOrderBys > 0)
|
2012-03-11 00:36:49 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
/* Must pfree distances to avoid memory leak */
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < so->nPtrs; i++)
|
|
|
|
if (so->distances[i])
|
|
|
|
pfree(so->distances[i]);
|
2012-03-11 00:36:49 +01:00
|
|
|
}
|
2011-12-19 20:58:41 +01:00
|
|
|
|
|
|
|
if (so->want_itup)
|
|
|
|
{
|
2017-02-27 23:20:34 +01:00
|
|
|
/* Must pfree reconstructed tuples to avoid memory leak */
|
2012-06-10 21:20:04 +02:00
|
|
|
int i;
|
2011-12-19 20:58:41 +01:00
|
|
|
|
|
|
|
for (i = 0; i < so->nPtrs; i++)
|
2017-02-27 23:20:34 +01:00
|
|
|
pfree(so->reconTups[i]);
|
2011-12-19 20:58:41 +01:00
|
|
|
}
|
|
|
|
so->iPtr = so->nPtrs = 0;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
2012-03-11 00:36:49 +01:00
|
|
|
/*
|
|
|
|
* Prepare scan keys in SpGistScanOpaque from caller-given scan keys
|
|
|
|
*
|
|
|
|
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
|
|
|
|
*
|
|
|
|
* The point here is to eliminate null-related considerations from what the
|
2014-05-06 18:12:18 +02:00
|
|
|
* opclass consistent functions need to deal with. We assume all SPGiST-
|
2012-03-11 00:36:49 +01:00
|
|
|
* indexable operators are strict, so any null RHS value makes the scan
|
|
|
|
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
|
|
|
|
* conditions; their effect is reflected into searchNulls/searchNonNulls.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
spgPrepareScanKeys(IndexScanDesc scan)
|
|
|
|
{
|
|
|
|
SpGistScanOpaque so = (SpGistScanOpaque) scan->opaque;
|
|
|
|
bool qual_ok;
|
|
|
|
bool haveIsNull;
|
|
|
|
bool haveNotNull;
|
|
|
|
int nkeys;
|
|
|
|
int i;
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
so->numberOfOrderBys = scan->numberOfOrderBys;
|
|
|
|
so->orderByData = scan->orderByData;
|
|
|
|
|
2019-09-19 20:30:19 +02:00
|
|
|
if (so->numberOfOrderBys <= 0)
|
|
|
|
so->numberOfNonNullOrderBys = 0;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
int j = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove all NULL keys, but remember their offsets in the original
|
|
|
|
* array.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < scan->numberOfOrderBys; i++)
|
|
|
|
{
|
|
|
|
ScanKey skey = &so->orderByData[i];
|
|
|
|
|
|
|
|
if (skey->sk_flags & SK_ISNULL)
|
|
|
|
so->nonNullOrderByOffsets[i] = -1;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (i != j)
|
|
|
|
so->orderByData[j] = *skey;
|
|
|
|
|
|
|
|
so->nonNullOrderByOffsets[i] = j++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
so->numberOfNonNullOrderBys = j;
|
|
|
|
}
|
|
|
|
|
2012-03-11 00:36:49 +01:00
|
|
|
if (scan->numberOfKeys <= 0)
|
|
|
|
{
|
|
|
|
/* If no quals, whole-index scan is required */
|
|
|
|
so->searchNulls = true;
|
|
|
|
so->searchNonNulls = true;
|
|
|
|
so->numberOfKeys = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Examine the given quals */
|
|
|
|
qual_ok = true;
|
|
|
|
haveIsNull = haveNotNull = false;
|
|
|
|
nkeys = 0;
|
|
|
|
for (i = 0; i < scan->numberOfKeys; i++)
|
|
|
|
{
|
|
|
|
ScanKey skey = &scan->keyData[i];
|
|
|
|
|
|
|
|
if (skey->sk_flags & SK_SEARCHNULL)
|
|
|
|
haveIsNull = true;
|
|
|
|
else if (skey->sk_flags & SK_SEARCHNOTNULL)
|
|
|
|
haveNotNull = true;
|
|
|
|
else if (skey->sk_flags & SK_ISNULL)
|
|
|
|
{
|
|
|
|
/* ordinary qual with null argument - unsatisfiable */
|
|
|
|
qual_ok = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* ordinary qual, propagate into so->keyData */
|
|
|
|
so->keyData[nkeys++] = *skey;
|
|
|
|
/* this effectively creates a not-null requirement */
|
|
|
|
haveNotNull = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IS NULL in combination with something else is unsatisfiable */
|
|
|
|
if (haveIsNull && haveNotNull)
|
|
|
|
qual_ok = false;
|
|
|
|
|
|
|
|
/* Emit results */
|
|
|
|
if (qual_ok)
|
|
|
|
{
|
|
|
|
so->searchNulls = haveIsNull;
|
|
|
|
so->searchNonNulls = haveNotNull;
|
|
|
|
so->numberOfKeys = nkeys;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
so->searchNulls = false;
|
|
|
|
so->searchNonNulls = false;
|
|
|
|
so->numberOfKeys = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
IndexScanDesc
|
|
|
|
spgbeginscan(Relation rel, int keysz, int orderbysz)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
|
|
|
IndexScanDesc scan;
|
|
|
|
SpGistScanOpaque so;
|
2018-09-19 00:54:10 +02:00
|
|
|
int i;
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
scan = RelationGetIndexScan(rel, keysz, orderbysz);
|
2011-12-17 22:41:16 +01:00
|
|
|
|
|
|
|
so = (SpGistScanOpaque) palloc0(sizeof(SpGistScanOpaqueData));
|
2012-03-11 00:36:49 +01:00
|
|
|
if (keysz > 0)
|
|
|
|
so->keyData = (ScanKey) palloc(sizeof(ScanKeyData) * keysz);
|
|
|
|
else
|
|
|
|
so->keyData = NULL;
|
2011-12-17 22:41:16 +01:00
|
|
|
initSpGistState(&so->state, scan->indexRelation);
|
2018-09-19 00:54:10 +02:00
|
|
|
|
2011-12-17 22:41:16 +01:00
|
|
|
so->tempCxt = AllocSetContextCreate(CurrentMemoryContext,
|
|
|
|
"SP-GiST search temporary context",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2018-03-20 04:59:17 +01:00
|
|
|
so->traversalCxt = AllocSetContextCreate(CurrentMemoryContext,
|
|
|
|
"SP-GiST traversal-value context",
|
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2011-12-19 20:58:41 +01:00
|
|
|
|
2017-02-27 23:20:34 +01:00
|
|
|
/* Set up indexTupDesc and xs_hitupdesc in case it's an index-only scan */
|
|
|
|
so->indexTupDesc = scan->xs_hitupdesc = RelationGetDescr(rel);
|
2011-12-19 20:58:41 +01:00
|
|
|
|
2018-10-31 22:04:42 +01:00
|
|
|
/* Allocate various arrays needed for order-by scans */
|
2018-09-19 00:54:10 +02:00
|
|
|
if (scan->numberOfOrderBys > 0)
|
|
|
|
{
|
2018-10-31 22:04:42 +01:00
|
|
|
/* This will be filled in spgrescan, but allocate the space here */
|
|
|
|
so->orderByTypes = (Oid *)
|
|
|
|
palloc(sizeof(Oid) * scan->numberOfOrderBys);
|
2019-09-19 20:30:19 +02:00
|
|
|
so->nonNullOrderByOffsets = (int *)
|
|
|
|
palloc(sizeof(int) * scan->numberOfOrderBys);
|
2018-10-31 22:04:42 +01:00
|
|
|
|
|
|
|
/* These arrays have constant contents, so we can fill them now */
|
|
|
|
so->zeroDistances = (double *)
|
|
|
|
palloc(sizeof(double) * scan->numberOfOrderBys);
|
|
|
|
so->infDistances = (double *)
|
|
|
|
palloc(sizeof(double) * scan->numberOfOrderBys);
|
2018-09-19 00:54:10 +02:00
|
|
|
|
|
|
|
for (i = 0; i < scan->numberOfOrderBys; i++)
|
|
|
|
{
|
|
|
|
so->zeroDistances[i] = 0.0;
|
|
|
|
so->infDistances[i] = get_float8_infinity();
|
|
|
|
}
|
|
|
|
|
2018-10-31 22:04:42 +01:00
|
|
|
scan->xs_orderbyvals = (Datum *)
|
|
|
|
palloc0(sizeof(Datum) * scan->numberOfOrderBys);
|
|
|
|
scan->xs_orderbynulls = (bool *)
|
|
|
|
palloc(sizeof(bool) * scan->numberOfOrderBys);
|
|
|
|
memset(scan->xs_orderbynulls, true,
|
|
|
|
sizeof(bool) * scan->numberOfOrderBys);
|
2018-09-19 00:54:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fmgr_info_copy(&so->innerConsistentFn,
|
|
|
|
index_getprocinfo(rel, 1, SPGIST_INNER_CONSISTENT_PROC),
|
|
|
|
CurrentMemoryContext);
|
|
|
|
|
|
|
|
fmgr_info_copy(&so->leafConsistentFn,
|
|
|
|
index_getprocinfo(rel, 1, SPGIST_LEAF_CONSISTENT_PROC),
|
|
|
|
CurrentMemoryContext);
|
|
|
|
|
|
|
|
so->indexCollation = rel->rd_indcollation[0];
|
|
|
|
|
2011-12-17 22:41:16 +01:00
|
|
|
scan->opaque = so;
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
return scan;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
void
|
|
|
|
spgrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
|
|
|
|
ScanKey orderbys, int norderbys)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
|
|
|
SpGistScanOpaque so = (SpGistScanOpaque) scan->opaque;
|
|
|
|
|
2012-03-11 00:36:49 +01:00
|
|
|
/* copy scankeys into local storage */
|
2011-12-17 22:41:16 +01:00
|
|
|
if (scankey && scan->numberOfKeys > 0)
|
|
|
|
memmove(scan->keyData, scankey,
|
|
|
|
scan->numberOfKeys * sizeof(ScanKeyData));
|
2018-09-19 00:54:10 +02:00
|
|
|
|
2018-10-31 22:04:42 +01:00
|
|
|
/* initialize order-by data if needed */
|
2018-09-19 00:54:10 +02:00
|
|
|
if (orderbys && scan->numberOfOrderBys > 0)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memmove(scan->orderByData, orderbys,
|
|
|
|
scan->numberOfOrderBys * sizeof(ScanKeyData));
|
|
|
|
|
|
|
|
for (i = 0; i < scan->numberOfOrderBys; i++)
|
|
|
|
{
|
|
|
|
ScanKey skey = &scan->orderByData[i];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up the datatype returned by the original ordering
|
|
|
|
* operator. SP-GiST always uses a float8 for the distance
|
|
|
|
* function, but the ordering operator could be anything else.
|
|
|
|
*
|
|
|
|
* XXX: The distance function is only allowed to be lossy if the
|
|
|
|
* ordering operator's result type is float4 or float8. Otherwise
|
|
|
|
* we don't know how to return the distance to the executor. But
|
|
|
|
* we cannot check that here, as we won't know if the distance
|
|
|
|
* function is lossy until it returns *recheck = true for the
|
|
|
|
* first time.
|
|
|
|
*/
|
|
|
|
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
|
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
2012-03-11 00:36:49 +01:00
|
|
|
/* preprocess scankeys, set up the representation in *so */
|
|
|
|
spgPrepareScanKeys(scan);
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/* set up starting queue entries */
|
2011-12-17 22:41:16 +01:00
|
|
|
resetSpGistScanOpaque(so);
|
|
|
|
}
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
void
|
|
|
|
spgendscan(IndexScanDesc scan)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
|
|
|
SpGistScanOpaque so = (SpGistScanOpaque) scan->opaque;
|
|
|
|
|
|
|
|
MemoryContextDelete(so->tempCxt);
|
2018-03-20 04:59:17 +01:00
|
|
|
MemoryContextDelete(so->traversalCxt);
|
2018-09-19 00:54:10 +02:00
|
|
|
|
2018-10-31 22:04:42 +01:00
|
|
|
if (so->keyData)
|
|
|
|
pfree(so->keyData);
|
|
|
|
|
|
|
|
if (so->state.deadTupleStorage)
|
|
|
|
pfree(so->state.deadTupleStorage);
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (scan->numberOfOrderBys > 0)
|
|
|
|
{
|
2018-10-31 22:04:42 +01:00
|
|
|
pfree(so->orderByTypes);
|
2019-09-19 20:30:19 +02:00
|
|
|
pfree(so->nonNullOrderByOffsets);
|
2018-09-19 00:54:10 +02:00
|
|
|
pfree(so->zeroDistances);
|
|
|
|
pfree(so->infDistances);
|
2018-10-31 22:04:42 +01:00
|
|
|
pfree(scan->xs_orderbyvals);
|
|
|
|
pfree(scan->xs_orderbynulls);
|
2018-09-19 00:54:10 +02:00
|
|
|
}
|
2018-10-31 22:04:42 +01:00
|
|
|
|
|
|
|
pfree(so);
|
2018-09-19 00:54:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leaf SpGistSearchItem constructor, called in queue context
|
|
|
|
*/
|
|
|
|
static SpGistSearchItem *
|
|
|
|
spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr,
|
|
|
|
Datum leafValue, bool recheck, bool recheckDistances,
|
|
|
|
bool isnull, double *distances)
|
|
|
|
{
|
|
|
|
SpGistSearchItem *item = spgAllocSearchItem(so, isnull, distances);
|
|
|
|
|
|
|
|
item->level = level;
|
|
|
|
item->heapPtr = *heapPtr;
|
|
|
|
/* copy value to queue cxt out of tmp cxt */
|
|
|
|
item->value = isnull ? (Datum) 0 :
|
|
|
|
datumCopy(leafValue, so->state.attLeafType.attbyval,
|
|
|
|
so->state.attLeafType.attlen);
|
|
|
|
item->traversalValue = NULL;
|
|
|
|
item->isLeaf = true;
|
|
|
|
item->recheck = recheck;
|
|
|
|
item->recheckDistances = recheckDistances;
|
|
|
|
|
|
|
|
return item;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-03-11 21:29:04 +01:00
|
|
|
* Test whether a leaf tuple satisfies all the scan keys
|
2011-12-17 22:41:16 +01:00
|
|
|
*
|
2018-09-19 00:54:10 +02:00
|
|
|
* *reportedSome is set to true if:
|
|
|
|
* the scan is not ordered AND the item satisfies the scankeys
|
2011-12-17 22:41:16 +01:00
|
|
|
*/
|
|
|
|
static bool
|
2019-05-22 18:55:34 +02:00
|
|
|
spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
|
2012-03-11 21:29:04 +01:00
|
|
|
SpGistLeafTuple leafTuple, bool isnull,
|
2018-09-19 00:54:10 +02:00
|
|
|
bool *reportedSome, storeRes_func storeRes)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
Datum leafValue;
|
|
|
|
double *distances;
|
2012-03-11 00:36:49 +01:00
|
|
|
bool result;
|
2018-09-19 00:54:10 +02:00
|
|
|
bool recheck;
|
|
|
|
bool recheckDistances;
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2012-03-11 21:29:04 +01:00
|
|
|
if (isnull)
|
|
|
|
{
|
|
|
|
/* Should not have arrived on a nulls page unless nulls are wanted */
|
|
|
|
Assert(so->searchNulls);
|
2018-09-19 00:54:10 +02:00
|
|
|
leafValue = (Datum) 0;
|
|
|
|
distances = NULL;
|
|
|
|
recheck = false;
|
|
|
|
recheckDistances = false;
|
|
|
|
result = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
spgLeafConsistentIn in;
|
|
|
|
spgLeafConsistentOut out;
|
|
|
|
|
|
|
|
/* use temp context for calling leaf_consistent */
|
|
|
|
MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
|
|
|
|
|
|
|
|
in.scankeys = so->keyData;
|
|
|
|
in.nkeys = so->numberOfKeys;
|
|
|
|
in.orderbys = so->orderByData;
|
2019-09-19 20:30:19 +02:00
|
|
|
in.norderbys = so->numberOfNonNullOrderBys;
|
2018-09-19 00:54:10 +02:00
|
|
|
in.reconstructedValue = item->value;
|
|
|
|
in.traversalValue = item->traversalValue;
|
|
|
|
in.level = item->level;
|
|
|
|
in.returnData = so->want_itup;
|
|
|
|
in.leafDatum = SGLTDATUM(leafTuple, &so->state);
|
|
|
|
|
|
|
|
out.leafValue = (Datum) 0;
|
|
|
|
out.recheck = false;
|
|
|
|
out.distances = NULL;
|
|
|
|
out.recheckDistances = false;
|
|
|
|
|
|
|
|
result = DatumGetBool(FunctionCall2Coll(&so->leafConsistentFn,
|
|
|
|
so->indexCollation,
|
|
|
|
PointerGetDatum(&in),
|
|
|
|
PointerGetDatum(&out)));
|
|
|
|
recheck = out.recheck;
|
|
|
|
recheckDistances = out.recheckDistances;
|
|
|
|
leafValue = out.leafValue;
|
|
|
|
distances = out.distances;
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldCxt);
|
2012-03-11 21:29:04 +01:00
|
|
|
}
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (result)
|
|
|
|
{
|
|
|
|
/* item passes the scankeys */
|
2019-09-19 20:30:19 +02:00
|
|
|
if (so->numberOfNonNullOrderBys > 0)
|
2018-09-19 00:54:10 +02:00
|
|
|
{
|
|
|
|
/* the scan is ordered -> add the item to the queue */
|
|
|
|
MemoryContext oldCxt = MemoryContextSwitchTo(so->traversalCxt);
|
|
|
|
SpGistSearchItem *heapItem = spgNewHeapItem(so, item->level,
|
|
|
|
&leafTuple->heapPtr,
|
|
|
|
leafValue,
|
|
|
|
recheck,
|
|
|
|
recheckDistances,
|
|
|
|
isnull,
|
|
|
|
distances);
|
|
|
|
|
|
|
|
spgAddSearchItemToQueue(so, heapItem);
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldCxt);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* non-ordered scan, so report the item right away */
|
|
|
|
Assert(!recheckDistances);
|
|
|
|
storeRes(so, &leafTuple->heapPtr, leafValue, isnull,
|
|
|
|
recheck, false, NULL);
|
|
|
|
*reportedSome = true;
|
|
|
|
}
|
|
|
|
}
|
2012-03-11 21:29:04 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
return result;
|
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/* A bundle initializer for inner_consistent methods */
|
|
|
|
static void
|
|
|
|
spgInitInnerConsistentIn(spgInnerConsistentIn *in,
|
|
|
|
SpGistScanOpaque so,
|
2019-05-22 18:55:34 +02:00
|
|
|
SpGistSearchItem *item,
|
2018-09-19 00:54:10 +02:00
|
|
|
SpGistInnerTuple innerTuple)
|
|
|
|
{
|
|
|
|
in->scankeys = so->keyData;
|
|
|
|
in->orderbys = so->orderByData;
|
|
|
|
in->nkeys = so->numberOfKeys;
|
2019-09-19 20:30:19 +02:00
|
|
|
in->norderbys = so->numberOfNonNullOrderBys;
|
2018-09-19 00:54:10 +02:00
|
|
|
in->reconstructedValue = item->value;
|
|
|
|
in->traversalMemoryContext = so->traversalCxt;
|
|
|
|
in->traversalValue = item->traversalValue;
|
|
|
|
in->level = item->level;
|
|
|
|
in->returnData = so->want_itup;
|
|
|
|
in->allTheSame = innerTuple->allTheSame;
|
|
|
|
in->hasPrefix = (innerTuple->prefixSize > 0);
|
|
|
|
in->prefixDatum = SGITDATUM(innerTuple, &so->state);
|
|
|
|
in->nNodes = innerTuple->nNodes;
|
|
|
|
in->nodeLabels = spgExtractNodeLabels(&so->state, innerTuple);
|
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
static SpGistSearchItem *
|
|
|
|
spgMakeInnerItem(SpGistScanOpaque so,
|
2019-05-22 18:55:34 +02:00
|
|
|
SpGistSearchItem *parentItem,
|
2018-09-19 00:54:10 +02:00
|
|
|
SpGistNodeTuple tuple,
|
|
|
|
spgInnerConsistentOut *out, int i, bool isnull,
|
|
|
|
double *distances)
|
|
|
|
{
|
|
|
|
SpGistSearchItem *item = spgAllocSearchItem(so, isnull, distances);
|
2011-12-19 20:58:41 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
item->heapPtr = tuple->t_tid;
|
|
|
|
item->level = out->levelAdds ? parentItem->level + out->levelAdds[i]
|
|
|
|
: parentItem->level;
|
2011-12-19 20:58:41 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/* Must copy value out of temp context */
|
|
|
|
item->value = out->reconstructedValues
|
|
|
|
? datumCopy(out->reconstructedValues[i],
|
|
|
|
so->state.attLeafType.attbyval,
|
|
|
|
so->state.attLeafType.attlen)
|
|
|
|
: (Datum) 0;
|
2011-12-18 01:08:28 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/*
|
|
|
|
* Elements of out.traversalValues should be allocated in
|
|
|
|
* in.traversalMemoryContext, which is actually a long lived context of
|
|
|
|
* index scan.
|
|
|
|
*/
|
|
|
|
item->traversalValue =
|
|
|
|
out->traversalValues ? out->traversalValues[i] : NULL;
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
item->isLeaf = false;
|
|
|
|
item->recheck = false;
|
|
|
|
item->recheckDistances = false;
|
|
|
|
|
|
|
|
return item;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-05-22 18:55:34 +02:00
|
|
|
spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
|
2018-09-19 00:54:10 +02:00
|
|
|
SpGistInnerTuple innerTuple, bool isnull)
|
|
|
|
{
|
|
|
|
MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
|
|
|
|
spgInnerConsistentOut out;
|
|
|
|
int nNodes = innerTuple->nNodes;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memset(&out, 0, sizeof(out));
|
|
|
|
|
|
|
|
if (!isnull)
|
|
|
|
{
|
|
|
|
spgInnerConsistentIn in;
|
|
|
|
|
|
|
|
spgInitInnerConsistentIn(&in, so, item, innerTuple);
|
|
|
|
|
|
|
|
/* use user-defined inner consistent method */
|
|
|
|
FunctionCall2Coll(&so->innerConsistentFn,
|
|
|
|
so->indexCollation,
|
|
|
|
PointerGetDatum(&in),
|
|
|
|
PointerGetDatum(&out));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* force all children to be visited */
|
|
|
|
out.nNodes = nNodes;
|
|
|
|
out.nodeNumbers = (int *) palloc(sizeof(int) * nNodes);
|
|
|
|
for (i = 0; i < nNodes; i++)
|
|
|
|
out.nodeNumbers[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If allTheSame, they should all or none of them match */
|
|
|
|
if (innerTuple->allTheSame && out.nNodes != 0 && out.nNodes != nNodes)
|
|
|
|
elog(ERROR, "inconsistent inner_consistent results for allTheSame inner tuple");
|
|
|
|
|
|
|
|
if (out.nNodes)
|
|
|
|
{
|
|
|
|
/* collect node pointers */
|
|
|
|
SpGistNodeTuple node;
|
|
|
|
SpGistNodeTuple *nodes = (SpGistNodeTuple *) palloc(
|
|
|
|
sizeof(SpGistNodeTuple) * nNodes);
|
|
|
|
|
|
|
|
SGITITERATE(innerTuple, i, node)
|
|
|
|
{
|
|
|
|
nodes[i] = node;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(so->traversalCxt);
|
|
|
|
|
|
|
|
for (i = 0; i < out.nNodes; i++)
|
|
|
|
{
|
|
|
|
int nodeN = out.nodeNumbers[i];
|
|
|
|
SpGistSearchItem *innerItem;
|
|
|
|
double *distances;
|
|
|
|
|
|
|
|
Assert(nodeN >= 0 && nodeN < nNodes);
|
|
|
|
|
|
|
|
node = nodes[nodeN];
|
|
|
|
|
|
|
|
if (!ItemPointerIsValid(&node->t_tid))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
2019-07-22 03:01:50 +02:00
|
|
|
* Use infinity distances if innerConsistentFn() failed to return
|
2018-09-19 00:54:10 +02:00
|
|
|
* them or if is a NULL item (their distances are really unused).
|
|
|
|
*/
|
|
|
|
distances = out.distances ? out.distances[i] : so->infDistances;
|
|
|
|
|
|
|
|
innerItem = spgMakeInnerItem(so, item, node, &out, i, isnull,
|
|
|
|
distances);
|
|
|
|
|
|
|
|
spgAddSearchItemToQueue(so, innerItem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldCxt);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns a next item in an (ordered) scan or null if the index is exhausted */
|
|
|
|
static SpGistSearchItem *
|
|
|
|
spgGetNextQueueItem(SpGistScanOpaque so)
|
|
|
|
{
|
|
|
|
if (pairingheap_is_empty(so->scanQueue))
|
|
|
|
return NULL; /* Done when both heaps are empty */
|
|
|
|
|
|
|
|
/* Return item; caller is responsible to pfree it */
|
|
|
|
return (SpGistSearchItem *) pairingheap_remove_first(so->scanQueue);
|
|
|
|
}
|
|
|
|
|
|
|
|
enum SpGistSpecialOffsetNumbers
|
|
|
|
{
|
|
|
|
SpGistBreakOffsetNumber = InvalidOffsetNumber,
|
|
|
|
SpGistRedirectOffsetNumber = MaxOffsetNumber + 1,
|
|
|
|
SpGistErrorOffsetNumber = MaxOffsetNumber + 2
|
|
|
|
};
|
|
|
|
|
|
|
|
static OffsetNumber
|
|
|
|
spgTestLeafTuple(SpGistScanOpaque so,
|
2019-05-22 18:55:34 +02:00
|
|
|
SpGistSearchItem *item,
|
2018-09-19 00:54:10 +02:00
|
|
|
Page page, OffsetNumber offset,
|
|
|
|
bool isnull, bool isroot,
|
|
|
|
bool *reportedSome,
|
|
|
|
storeRes_func storeRes)
|
|
|
|
{
|
|
|
|
SpGistLeafTuple leafTuple = (SpGistLeafTuple)
|
|
|
|
PageGetItem(page, PageGetItemId(page, offset));
|
|
|
|
|
|
|
|
if (leafTuple->tupstate != SPGIST_LIVE)
|
|
|
|
{
|
|
|
|
if (!isroot) /* all tuples on root should be live */
|
|
|
|
{
|
|
|
|
if (leafTuple->tupstate == SPGIST_REDIRECT)
|
|
|
|
{
|
|
|
|
/* redirection tuple should be first in chain */
|
|
|
|
Assert(offset == ItemPointerGetOffsetNumber(&item->heapPtr));
|
|
|
|
/* transfer attention to redirect point */
|
|
|
|
item->heapPtr = ((SpGistDeadTuple) leafTuple)->pointer;
|
|
|
|
Assert(ItemPointerGetBlockNumber(&item->heapPtr) != SPGIST_METAPAGE_BLKNO);
|
|
|
|
return SpGistRedirectOffsetNumber;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (leafTuple->tupstate == SPGIST_DEAD)
|
|
|
|
{
|
|
|
|
/* dead tuple should be first in chain */
|
|
|
|
Assert(offset == ItemPointerGetOffsetNumber(&item->heapPtr));
|
|
|
|
/* No live entries on this page */
|
|
|
|
Assert(leafTuple->nextOffset == InvalidOffsetNumber);
|
|
|
|
return SpGistBreakOffsetNumber;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We should not arrive at a placeholder */
|
|
|
|
elog(ERROR, "unexpected SPGiST tuple state: %d", leafTuple->tupstate);
|
|
|
|
return SpGistErrorOffsetNumber;
|
|
|
|
}
|
|
|
|
|
|
|
|
Assert(ItemPointerIsValid(&leafTuple->heapPtr));
|
|
|
|
|
|
|
|
spgLeafTest(so, item, leafTuple, isnull, reportedSome, storeRes);
|
|
|
|
|
|
|
|
return leafTuple->nextOffset;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk the tree and report all tuples passing the scan quals to the storeRes
|
|
|
|
* subroutine.
|
|
|
|
*
|
|
|
|
* If scanWholeIndex is true, we'll do just that. If not, we'll stop at the
|
|
|
|
* next page boundary once we have reported at least one tuple.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
spgWalk(Relation index, SpGistScanOpaque so, bool scanWholeIndex,
|
2016-04-08 21:30:10 +02:00
|
|
|
storeRes_func storeRes, Snapshot snapshot)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
|
|
|
Buffer buffer = InvalidBuffer;
|
|
|
|
bool reportedSome = false;
|
|
|
|
|
|
|
|
while (scanWholeIndex || !reportedSome)
|
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
SpGistSearchItem *item = spgGetNextQueueItem(so);
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (item == NULL)
|
|
|
|
break; /* No more items in queue -> done */
|
2011-12-17 22:41:16 +01:00
|
|
|
|
|
|
|
redirect:
|
|
|
|
/* Check for interrupts, just in case of infinite loop */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (item->isLeaf)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
/* We store heap items in the queue only in case of ordered search */
|
2019-09-19 20:30:19 +02:00
|
|
|
Assert(so->numberOfNonNullOrderBys > 0);
|
2018-09-19 00:54:10 +02:00
|
|
|
storeRes(so, &item->heapPtr, item->value, item->isNull,
|
|
|
|
item->recheck, item->recheckDistances, item->distances);
|
|
|
|
reportedSome = true;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
2018-09-19 00:54:10 +02:00
|
|
|
else
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
BlockNumber blkno = ItemPointerGetBlockNumber(&item->heapPtr);
|
|
|
|
OffsetNumber offset = ItemPointerGetOffsetNumber(&item->heapPtr);
|
|
|
|
Page page;
|
|
|
|
bool isnull;
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (buffer == InvalidBuffer)
|
|
|
|
{
|
|
|
|
buffer = ReadBuffer(index, blkno);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
|
|
|
}
|
|
|
|
else if (blkno != BufferGetBlockNumber(buffer))
|
|
|
|
{
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
buffer = ReadBuffer(index, blkno);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/* else new pointer points to the same page, no work needed */
|
2012-03-11 21:29:04 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
page = BufferGetPage(buffer);
|
|
|
|
TestForOldSnapshot(snapshot, index, page);
|
|
|
|
|
|
|
|
isnull = SpGistPageStoresNulls(page) ? true : false;
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (SpGistPageIsLeaf(page))
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
/* Page is a leaf - that is, all it's tuples are heap items */
|
|
|
|
OffsetNumber max = PageGetMaxOffsetNumber(page);
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (SpGistBlockIsRoot(blkno))
|
|
|
|
{
|
|
|
|
/* When root is a leaf, examine all its tuples */
|
|
|
|
for (offset = FirstOffsetNumber; offset <= max; offset++)
|
|
|
|
(void) spgTestLeafTuple(so, item, page, offset,
|
|
|
|
isnull, true,
|
|
|
|
&reportedSome, storeRes);
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
2018-09-19 00:54:10 +02:00
|
|
|
else
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
/* Normal case: just examine the chain we arrived at */
|
|
|
|
while (offset != InvalidOffsetNumber)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
Assert(offset >= FirstOffsetNumber && offset <= max);
|
|
|
|
offset = spgTestLeafTuple(so, item, page, offset,
|
|
|
|
isnull, false,
|
|
|
|
&reportedSome, storeRes);
|
|
|
|
if (offset == SpGistRedirectOffsetNumber)
|
2011-12-17 22:41:16 +01:00
|
|
|
goto redirect;
|
|
|
|
}
|
|
|
|
}
|
2012-03-11 21:29:04 +01:00
|
|
|
}
|
2018-09-19 00:54:10 +02:00
|
|
|
else /* page is inner */
|
2012-03-11 00:36:49 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
SpGistInnerTuple innerTuple = (SpGistInnerTuple)
|
|
|
|
PageGetItem(page, PageGetItemId(page, offset));
|
2011-12-17 22:41:16 +01:00
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (innerTuple->tupstate != SPGIST_LIVE)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
if (innerTuple->tupstate == SPGIST_REDIRECT)
|
|
|
|
{
|
|
|
|
/* transfer attention to redirect point */
|
|
|
|
item->heapPtr = ((SpGistDeadTuple) innerTuple)->pointer;
|
|
|
|
Assert(ItemPointerGetBlockNumber(&item->heapPtr) !=
|
|
|
|
SPGIST_METAPAGE_BLKNO);
|
|
|
|
goto redirect;
|
|
|
|
}
|
|
|
|
elog(ERROR, "unexpected SPGiST tuple state: %d",
|
|
|
|
innerTuple->tupstate);
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
2018-09-19 00:54:10 +02:00
|
|
|
|
|
|
|
spgInnerTest(so, item, innerTuple, isnull);
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/* done with this scan item */
|
|
|
|
spgFreeSearchItem(so, item);
|
2011-12-17 22:41:16 +01:00
|
|
|
/* clear temp context before proceeding to the next one */
|
|
|
|
MemoryContextReset(so->tempCxt);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buffer != InvalidBuffer)
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
}
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
|
2011-12-17 22:41:16 +01:00
|
|
|
/* storeRes subroutine for getbitmap case */
|
|
|
|
static void
|
2011-12-19 20:58:41 +01:00
|
|
|
storeBitmap(SpGistScanOpaque so, ItemPointer heapPtr,
|
2018-09-19 00:54:10 +02:00
|
|
|
Datum leafValue, bool isnull, bool recheck, bool recheckDistances,
|
|
|
|
double *distances)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
Assert(!recheckDistances && !distances);
|
2011-12-17 22:41:16 +01:00
|
|
|
tbm_add_tuples(so->tbm, heapPtr, 1, recheck);
|
|
|
|
so->ntids++;
|
|
|
|
}
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
int64
|
|
|
|
spggetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
|
|
|
SpGistScanOpaque so = (SpGistScanOpaque) scan->opaque;
|
|
|
|
|
2012-03-11 00:36:49 +01:00
|
|
|
/* Copy want_itup to *so so we don't need to pass it around separately */
|
2011-12-19 20:58:41 +01:00
|
|
|
so->want_itup = false;
|
2011-12-17 22:41:16 +01:00
|
|
|
|
|
|
|
so->tbm = tbm;
|
|
|
|
so->ntids = 0;
|
|
|
|
|
2016-04-08 21:30:10 +02:00
|
|
|
spgWalk(scan->indexRelation, so, true, storeBitmap, scan->xs_snapshot);
|
2011-12-17 22:41:16 +01:00
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
return so->ntids;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* storeRes subroutine for gettuple case */
|
|
|
|
static void
|
2011-12-19 20:58:41 +01:00
|
|
|
storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
|
2018-09-19 00:54:10 +02:00
|
|
|
Datum leafValue, bool isnull, bool recheck, bool recheckDistances,
|
2019-09-19 20:30:19 +02:00
|
|
|
double *nonNullDistances)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
|
|
|
Assert(so->nPtrs < MaxIndexTuplesPerPage);
|
|
|
|
so->heapPtrs[so->nPtrs] = *heapPtr;
|
|
|
|
so->recheck[so->nPtrs] = recheck;
|
2018-09-19 00:54:10 +02:00
|
|
|
so->recheckDistances[so->nPtrs] = recheckDistances;
|
|
|
|
|
|
|
|
if (so->numberOfOrderBys > 0)
|
|
|
|
{
|
2019-09-19 20:30:19 +02:00
|
|
|
if (isnull || so->numberOfNonNullOrderBys <= 0)
|
2018-09-19 00:54:10 +02:00
|
|
|
so->distances[so->nPtrs] = NULL;
|
|
|
|
else
|
|
|
|
{
|
2019-09-19 20:30:19 +02:00
|
|
|
IndexOrderByDistance *distances =
|
|
|
|
palloc(sizeof(distances[0]) * so->numberOfOrderBys);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < so->numberOfOrderBys; i++)
|
|
|
|
{
|
|
|
|
int offset = so->nonNullOrderByOffsets[i];
|
|
|
|
|
|
|
|
if (offset >= 0)
|
|
|
|
{
|
|
|
|
/* Copy non-NULL distance value */
|
|
|
|
distances[i].value = nonNullDistances[offset];
|
|
|
|
distances[i].isnull = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Set distance's NULL flag. */
|
|
|
|
distances[i].value = 0.0;
|
|
|
|
distances[i].isnull = true;
|
|
|
|
}
|
|
|
|
}
|
2018-09-19 00:54:10 +02:00
|
|
|
|
2019-09-19 20:30:19 +02:00
|
|
|
so->distances[so->nPtrs] = distances;
|
2018-09-19 00:54:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-19 20:58:41 +01:00
|
|
|
if (so->want_itup)
|
|
|
|
{
|
|
|
|
/*
|
2017-02-27 23:20:34 +01:00
|
|
|
* Reconstruct index data. We have to copy the datum out of the temp
|
|
|
|
* context anyway, so we may as well create the tuple here.
|
2011-12-19 20:58:41 +01:00
|
|
|
*/
|
2017-02-27 23:20:34 +01:00
|
|
|
so->reconTups[so->nPtrs] = heap_form_tuple(so->indexTupDesc,
|
|
|
|
&leafValue,
|
|
|
|
&isnull);
|
2011-12-19 20:58:41 +01:00
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
so->nPtrs++;
|
|
|
|
}
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
bool
|
|
|
|
spggettuple(IndexScanDesc scan, ScanDirection dir)
|
2011-12-17 22:41:16 +01:00
|
|
|
{
|
|
|
|
SpGistScanOpaque so = (SpGistScanOpaque) scan->opaque;
|
|
|
|
|
|
|
|
if (dir != ForwardScanDirection)
|
|
|
|
elog(ERROR, "SP-GiST only supports forward scan direction");
|
|
|
|
|
2012-03-11 00:36:49 +01:00
|
|
|
/* Copy want_itup to *so so we don't need to pass it around separately */
|
2011-12-19 20:58:41 +01:00
|
|
|
so->want_itup = scan->xs_want_itup;
|
2011-12-17 22:41:16 +01:00
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
if (so->iPtr < so->nPtrs)
|
|
|
|
{
|
2018-09-19 00:54:10 +02:00
|
|
|
/* continuing to return reported tuples */
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
scan->xs_heaptid = so->heapPtrs[so->iPtr];
|
2011-12-17 22:41:16 +01:00
|
|
|
scan->xs_recheck = so->recheck[so->iPtr];
|
2017-02-27 23:20:34 +01:00
|
|
|
scan->xs_hitup = so->reconTups[so->iPtr];
|
2018-09-19 00:54:10 +02:00
|
|
|
|
|
|
|
if (so->numberOfOrderBys > 0)
|
|
|
|
index_store_float8_orderby_distances(scan, so->orderByTypes,
|
|
|
|
so->distances[so->iPtr],
|
|
|
|
so->recheckDistances[so->iPtr]);
|
2011-12-17 22:41:16 +01:00
|
|
|
so->iPtr++;
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
return true;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
if (so->numberOfOrderBys > 0)
|
|
|
|
{
|
|
|
|
/* Must pfree distances to avoid memory leak */
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < so->nPtrs; i++)
|
|
|
|
if (so->distances[i])
|
|
|
|
pfree(so->distances[i]);
|
|
|
|
}
|
|
|
|
|
2011-12-19 20:58:41 +01:00
|
|
|
if (so->want_itup)
|
|
|
|
{
|
2017-02-27 23:20:34 +01:00
|
|
|
/* Must pfree reconstructed tuples to avoid memory leak */
|
2012-06-10 21:20:04 +02:00
|
|
|
int i;
|
2011-12-19 20:58:41 +01:00
|
|
|
|
|
|
|
for (i = 0; i < so->nPtrs; i++)
|
2017-02-27 23:20:34 +01:00
|
|
|
pfree(so->reconTups[i]);
|
2011-12-19 20:58:41 +01:00
|
|
|
}
|
2011-12-17 22:41:16 +01:00
|
|
|
so->iPtr = so->nPtrs = 0;
|
2011-12-19 20:58:41 +01:00
|
|
|
|
2016-04-08 21:30:10 +02:00
|
|
|
spgWalk(scan->indexRelation, so, false, storeGettuple,
|
|
|
|
scan->xs_snapshot);
|
2011-12-17 22:41:16 +01:00
|
|
|
|
|
|
|
if (so->nPtrs == 0)
|
|
|
|
break; /* must have completed scan */
|
|
|
|
}
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
return false;
|
2011-12-17 22:41:16 +01:00
|
|
|
}
|
2011-12-18 21:49:00 +01:00
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
bool
|
|
|
|
spgcanreturn(Relation index, int attno)
|
2011-12-18 21:49:00 +01:00
|
|
|
{
|
2011-12-19 20:58:41 +01:00
|
|
|
SpGistCache *cache;
|
|
|
|
|
|
|
|
/* We can do it if the opclass config function says so */
|
|
|
|
cache = spgGetCache(index);
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
return cache->config.canReturnData;
|
2011-12-18 21:49:00 +01:00
|
|
|
}
|