postgresql/src/backend/access/gist/gistscan.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

359 lines
10 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* gistscan.c
* routines to manage scans on GiST index relations
*
*
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/backend/access/gist/gistscan.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
1996-10-31 09:09:47 +01:00
#include "access/gist_private.h"
#include "access/gistscan.h"
#include "access/relscan.h"
#include "utils/float.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
1996-10-21 07:14:02 +02:00
/*
* Pairing heap comparison function for the GISTSearchItem queue
*/
static int
pairingheap_GISTSearchItem_cmp(const pairingheap_node *a, const pairingheap_node *b, void *arg)
{
const GISTSearchItem *sa = (const GISTSearchItem *) a;
const GISTSearchItem *sb = (const GISTSearchItem *) b;
IndexScanDesc scan = (IndexScanDesc) arg;
int i;
/* Order according to distance comparison */
for (i = 0; i < scan->numberOfOrderBys; i++)
{
if (sa->distances[i].isnull)
{
if (!sb->distances[i].isnull)
return -1;
}
else if (sb->distances[i].isnull)
{
return 1;
}
else
{
int cmp = -float8_cmp_internal(sa->distances[i].value,
sb->distances[i].value);
if (cmp != 0)
return cmp;
}
}
/* Heap items go before inner pages, to ensure a depth-first search */
if (GISTSearchItemIsHeap(*sa) && !GISTSearchItemIsHeap(*sb))
return 1;
if (!GISTSearchItemIsHeap(*sa) && GISTSearchItemIsHeap(*sb))
return -1;
return 0;
}
/*
* Index AM API functions for scanning GiST indexes
*/
IndexScanDesc
gistbeginscan(Relation r, int nkeys, int norderbys)
{
IndexScanDesc scan;
GISTSTATE *giststate;
GISTScanOpaque so;
MemoryContext oldCxt;
scan = RelationGetIndexScan(r, nkeys, norderbys);
/* First, set up a GISTSTATE with a scan-lifespan memory context */
giststate = initGISTstate(scan->indexRelation);
/*
* Everything made below is in the scanCxt, or is a child of the scanCxt,
* so it'll all go away automatically in gistendscan.
*/
oldCxt = MemoryContextSwitchTo(giststate->scanCxt);
/* initialize opaque data */
so = (GISTScanOpaque) palloc0(sizeof(GISTScanOpaqueData));
so->giststate = giststate;
giststate->tempCxt = createTempGistContext();
so->queue = NULL;
so->queueCxt = giststate->scanCxt; /* see gistrescan */
/* workspaces with size dependent on numberOfOrderBys: */
so->distances = palloc(sizeof(so->distances[0]) * scan->numberOfOrderBys);
so->qual_ok = true; /* in case there are zero keys */
if (scan->numberOfOrderBys > 0)
{
scan->xs_orderbyvals = palloc0(sizeof(Datum) * scan->numberOfOrderBys);
scan->xs_orderbynulls = palloc(sizeof(bool) * scan->numberOfOrderBys);
memset(scan->xs_orderbynulls, true, sizeof(bool) * scan->numberOfOrderBys);
}
so->killedItems = NULL; /* until needed */
so->numKilled = 0;
so->curBlkno = InvalidBlockNumber;
so->curPageLSN = InvalidXLogRecPtr;
scan->opaque = so;
/*
* All fields required for index-only scans are initialized in gistrescan,
* as we don't know yet if we're doing an index-only scan or not.
*/
MemoryContextSwitchTo(oldCxt);
return scan;
}
void
gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
ScanKey orderbys, int norderbys)
{
/* nkeys and norderbys arguments are ignored */
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
bool first_time;
int i;
MemoryContext oldCxt;
/* rescan an existing indexscan --- reset state */
/*
* The first time through, we create the search queue in the scanCxt.
* Subsequent times through, we create the queue in a separate queueCxt,
* which is created on the second call and reset on later calls. Thus, in
* the common case where a scan is only rescan'd once, we just put the
* queue in scanCxt and don't pay the overhead of making a second memory
* context. If we do rescan more than once, the first queue is just left
* for dead until end of scan; this small wastage seems worth the savings
* in the common case.
*/
if (so->queue == NULL)
{
/* first time through */
Assert(so->queueCxt == so->giststate->scanCxt);
first_time = true;
}
else if (so->queueCxt == so->giststate->scanCxt)
{
/* second time through */
so->queueCxt = AllocSetContextCreate(so->giststate->scanCxt,
"GiST queue context",
Add macros to make AllocSetContextCreate() calls simpler and safer. I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls had typos in the context-sizing parameters. While none of these led to especially significant problems, they did create minor inefficiencies, and it's now clear that expecting people to copy-and-paste those calls accurately is not a great idea. Let's reduce the risk of future errors by introducing single macros that encapsulate the common use-cases. Three such macros are enough to cover all but two special-purpose contexts; those two calls can be left as-is, I think. While this patch doesn't in itself improve matters for third-party extensions, it doesn't break anything for them either, and they can gradually adopt the simplified notation over time. In passing, change TopMemoryContext to use the default allocation parameters. Formerly it could only be extended 8K at a time. That was probably reasonable when this code was written; but nowadays we create many more contexts than we did then, so that it's not unusual to have a couple hundred K in TopMemoryContext, even without considering various dubious code that sticks other things there. There seems no good reason not to let it use growing blocks like most other contexts. Back-patch to 9.6, mostly because that's still close enough to HEAD that it's easy to do so, and keeping the branches in sync can be expected to avoid some future back-patching pain. The bugs fixed by these changes don't seem to be significant enough to justify fixing them further back. Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
ALLOCSET_DEFAULT_SIZES);
first_time = false;
}
else
{
/* third or later time through */
MemoryContextReset(so->queueCxt);
first_time = false;
}
/*
* If we're doing an index-only scan, on the first call, also initialize a
* tuple descriptor to represent the returned index tuples and create a
* memory context to hold them during the scan.
*/
if (scan->xs_want_itup && !scan->xs_hitupdesc)
{
int natts;
int nkeyatts;
int attno;
/*
* The storage type of the index can be different from the original
* datatype being indexed, so we cannot just grab the index's tuple
* descriptor. Instead, construct a descriptor with the original data
* types.
*/
natts = RelationGetNumberOfAttributes(scan->indexRelation);
nkeyatts = IndexRelationGetNumberOfKeyAttributes(scan->indexRelation);
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts);
for (attno = 1; attno <= nkeyatts; attno++)
{
TupleDescInitEntry(so->giststate->fetchTupdesc, attno, NULL,
scan->indexRelation->rd_opcintype[attno - 1],
-1, 0);
}
for (; attno <= natts; attno++)
{
/* taking opcintype from giststate->tupdesc */
TupleDescInitEntry(so->giststate->fetchTupdesc, attno, NULL,
TupleDescAttr(so->giststate->leafTupdesc,
attno - 1)->atttypid,
-1, 0);
}
scan->xs_hitupdesc = so->giststate->fetchTupdesc;
/* Also create a memory context that will hold the returned tuples */
so->pageDataCxt = AllocSetContextCreate(so->giststate->scanCxt,
"GiST page data context",
Add macros to make AllocSetContextCreate() calls simpler and safer. I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls had typos in the context-sizing parameters. While none of these led to especially significant problems, they did create minor inefficiencies, and it's now clear that expecting people to copy-and-paste those calls accurately is not a great idea. Let's reduce the risk of future errors by introducing single macros that encapsulate the common use-cases. Three such macros are enough to cover all but two special-purpose contexts; those two calls can be left as-is, I think. While this patch doesn't in itself improve matters for third-party extensions, it doesn't break anything for them either, and they can gradually adopt the simplified notation over time. In passing, change TopMemoryContext to use the default allocation parameters. Formerly it could only be extended 8K at a time. That was probably reasonable when this code was written; but nowadays we create many more contexts than we did then, so that it's not unusual to have a couple hundred K in TopMemoryContext, even without considering various dubious code that sticks other things there. There seems no good reason not to let it use growing blocks like most other contexts. Back-patch to 9.6, mostly because that's still close enough to HEAD that it's easy to do so, and keeping the branches in sync can be expected to avoid some future back-patching pain. The bugs fixed by these changes don't seem to be significant enough to justify fixing them further back. Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
ALLOCSET_DEFAULT_SIZES);
}
/* create new, empty pairing heap for search queue */
oldCxt = MemoryContextSwitchTo(so->queueCxt);
so->queue = pairingheap_allocate(pairingheap_GISTSearchItem_cmp, scan);
MemoryContextSwitchTo(oldCxt);
so->firstCall = true;
/* Update scan key, if a new one is given */
if (key && scan->numberOfKeys > 0)
{
void **fn_extras = NULL;
/*
* If this isn't the first time through, preserve the fn_extra
* pointers, so that if the consistentFns are using them to cache
* data, that data is not leaked across a rescan.
*/
if (!first_time)
{
fn_extras = (void **) palloc(scan->numberOfKeys * sizeof(void *));
for (i = 0; i < scan->numberOfKeys; i++)
fn_extras[i] = scan->keyData[i].sk_func.fn_extra;
}
memmove(scan->keyData, key,
scan->numberOfKeys * sizeof(ScanKeyData));
2003-08-04 02:43:34 +02:00
/*
* Modify the scan key so that the Consistent method is called for all
* comparisons. The original operator is passed to the Consistent
* function in the form of its strategy number, which is available
* from the sk_strategy field, and its subtype from the sk_subtype
* field.
*
* Next, if any of keys is a NULL and that key is not marked with
* SK_SEARCHNULL/SK_SEARCHNOTNULL then nothing can be found (ie, we
* assume all indexable operators are strict).
*/
so->qual_ok = true;
for (i = 0; i < scan->numberOfKeys; i++)
{
ScanKey skey = scan->keyData + i;
/*
* Copy consistent support function to ScanKey structure instead
* of function implementing filtering operator.
*/
fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]),
so->giststate->scanCxt);
/* Restore prior fn_extra pointers, if not first time */
if (!first_time)
skey->sk_func.fn_extra = fn_extras[i];
if (skey->sk_flags & SK_ISNULL)
{
if (!(skey->sk_flags & (SK_SEARCHNULL | SK_SEARCHNOTNULL)))
so->qual_ok = false;
}
}
if (!first_time)
pfree(fn_extras);
}
/* Update order-by key, if a new one is given */
if (orderbys && scan->numberOfOrderBys > 0)
{
void **fn_extras = NULL;
/* As above, preserve fn_extra if not first time through */
if (!first_time)
{
fn_extras = (void **) palloc(scan->numberOfOrderBys * sizeof(void *));
for (i = 0; i < scan->numberOfOrderBys; i++)
fn_extras[i] = scan->orderByData[i].sk_func.fn_extra;
}
memmove(scan->orderByData, orderbys,
scan->numberOfOrderBys * sizeof(ScanKeyData));
so->orderByTypes = (Oid *) palloc(scan->numberOfOrderBys * sizeof(Oid));
/*
* Modify the order-by key so that the Distance method is called for
* all comparisons. The original operator is passed to the Distance
* function in the form of its strategy number, which is available
* from the sk_strategy field, and its subtype from the sk_subtype
* field.
*/
for (i = 0; i < scan->numberOfOrderBys; i++)
{
ScanKey skey = scan->orderByData + i;
FmgrInfo *finfo = &(so->giststate->distanceFn[skey->sk_attno - 1]);
/* Check we actually have a distance function ... */
if (!OidIsValid(finfo->fn_oid))
elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
GIST_DISTANCE_PROC, skey->sk_attno,
RelationGetRelationName(scan->indexRelation));
/*
* Look up the datatype returned by the original ordering
* operator. GiST always uses a float8 for the distance function,
* but the ordering operator could be anything else.
*
* XXX: The distance function is only allowed to be lossy if the
* ordering operator's result type is float4 or float8. Otherwise
* we don't know how to return the distance to the executor. But
* we cannot check that here, as we won't know if the distance
* function is lossy until it returns *recheck = true for the
* first time.
*/
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/*
* Copy distance support function to ScanKey structure instead of
* function implementing ordering operator.
*/
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
/* Restore prior fn_extra pointers, if not first time */
if (!first_time)
skey->sk_func.fn_extra = fn_extras[i];
}
if (!first_time)
pfree(fn_extras);
}
/* any previous xs_hitup will have been pfree'd in context resets above */
scan->xs_hitup = NULL;
}
void
gistendscan(IndexScanDesc scan)
{
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
/*
* freeGISTstate is enough to clean up everything made by gistbeginscan,
* as well as the queueCxt if there is a separate context for it.
*/
freeGISTstate(so->giststate);
}