postgresql/contrib/btree_gist/btree_inet.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

188 lines
4.3 KiB
C
Raw Normal View History

/*
2010-09-20 22:08:53 +02:00
* contrib/btree_gist/btree_inet.c
*/
#include "postgres.h"
#include "btree_gist.h"
#include "btree_utils_num.h"
#include "catalog/pg_type.h"
#include "utils/builtins.h"
#include "utils/inet.h"
typedef struct inetkey
{
double lower;
double upper;
} inetKEY;
/*
** inet ops
*/
PG_FUNCTION_INFO_V1(gbt_inet_compress);
PG_FUNCTION_INFO_V1(gbt_inet_union);
PG_FUNCTION_INFO_V1(gbt_inet_picksplit);
PG_FUNCTION_INFO_V1(gbt_inet_consistent);
PG_FUNCTION_INFO_V1(gbt_inet_penalty);
PG_FUNCTION_INFO_V1(gbt_inet_same);
static bool
gbt_inetgt(const void *a, const void *b, FmgrInfo *flinfo)
{
return (*((const double *) a) > *((const double *) b));
}
static bool
gbt_inetge(const void *a, const void *b, FmgrInfo *flinfo)
{
return (*((const double *) a) >= *((const double *) b));
}
static bool
gbt_ineteq(const void *a, const void *b, FmgrInfo *flinfo)
{
return (*((const double *) a) == *((const double *) b));
}
static bool
gbt_inetle(const void *a, const void *b, FmgrInfo *flinfo)
{
return (*((const double *) a) <= *((const double *) b));
}
static bool
gbt_inetlt(const void *a, const void *b, FmgrInfo *flinfo)
{
return (*((const double *) a) < *((const double *) b));
}
static int
gbt_inetkey_cmp(const void *a, const void *b, FmgrInfo *flinfo)
{
inetKEY *ia = (inetKEY *) (((const Nsrt *) a)->t);
inetKEY *ib = (inetKEY *) (((const Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
if (ia->upper == ib->upper)
return 0;
return (ia->upper > ib->upper) ? 1 : -1;
}
return (ia->lower > ib->lower) ? 1 : -1;
}
static const gbtree_ninfo tinfo =
{
gbt_t_inet,
sizeof(double),
16, /* sizeof(gbtreekey16) */
gbt_inetgt,
gbt_inetge,
gbt_ineteq,
gbt_inetle,
gbt_inetlt,
gbt_inetkey_cmp,
NULL
};
/**************************************************
* inet ops
**************************************************/
Datum
gbt_inet_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval;
if (entry->leafkey)
2004-08-29 07:07:03 +02:00
{
inetKEY *r = (inetKEY *) palloc(sizeof(inetKEY));
Fix assorted issues in convert_to_scalar(). If convert_to_scalar is passed a pair of datatypes it can't cope with, its former behavior was just to elog(ERROR). While this is OK so far as the core code is concerned, there's extension code that would like to use scalarltsel/scalargtsel/etc as selectivity estimators for operators that work on non-core datatypes, and this behavior is a show-stopper for that use-case. If we simply allow convert_to_scalar to return FALSE instead of outright failing, then the main logic of scalarltsel/scalargtsel will work fine for any operator that behaves like a scalar inequality comparison. The lack of conversion capability will mean that we can't estimate to better than histogram-bin-width precision, since the code will effectively assume that the comparison constant falls at the middle of its bin. But that's still a lot better than nothing. (Someday we should provide a way for extension code to supply a custom version of convert_to_scalar, but today is not that day.) While poking at this issue, we noted that the existing code for handling type bytea in convert_to_scalar is several bricks shy of a load. It assumes without checking that if the comparison value is type bytea, the bounds values are too; in the worst case this could lead to a crash. It also fails to detoast the input values, so that the comparison result is complete garbage if any input is toasted out-of-line, compressed, or even just short-header. I'm not sure how often such cases actually occur --- the bounds values, at least, are probably safe since they are elements of an array and hence can't be toasted. But that doesn't make this code OK. Back-patch to all supported branches, partly because author requested that, but mostly because of the bytea bugs. The change in API for the exposed routine convert_network_to_scalar() is theoretically a back-patch hazard, but it seems pretty unlikely that any third-party code is calling that function directly. Tomas Vondra, with some adjustments by me Discussion: https://postgr.es/m/b68441b6-d18f-13ab-b43b-9a72188a4e02@2ndquadrant.com
2018-03-04 02:31:35 +01:00
bool failure = false;
2004-08-29 07:07:03 +02:00
retval = palloc(sizeof(GISTENTRY));
Fix assorted issues in convert_to_scalar(). If convert_to_scalar is passed a pair of datatypes it can't cope with, its former behavior was just to elog(ERROR). While this is OK so far as the core code is concerned, there's extension code that would like to use scalarltsel/scalargtsel/etc as selectivity estimators for operators that work on non-core datatypes, and this behavior is a show-stopper for that use-case. If we simply allow convert_to_scalar to return FALSE instead of outright failing, then the main logic of scalarltsel/scalargtsel will work fine for any operator that behaves like a scalar inequality comparison. The lack of conversion capability will mean that we can't estimate to better than histogram-bin-width precision, since the code will effectively assume that the comparison constant falls at the middle of its bin. But that's still a lot better than nothing. (Someday we should provide a way for extension code to supply a custom version of convert_to_scalar, but today is not that day.) While poking at this issue, we noted that the existing code for handling type bytea in convert_to_scalar is several bricks shy of a load. It assumes without checking that if the comparison value is type bytea, the bounds values are too; in the worst case this could lead to a crash. It also fails to detoast the input values, so that the comparison result is complete garbage if any input is toasted out-of-line, compressed, or even just short-header. I'm not sure how often such cases actually occur --- the bounds values, at least, are probably safe since they are elements of an array and hence can't be toasted. But that doesn't make this code OK. Back-patch to all supported branches, partly because author requested that, but mostly because of the bytea bugs. The change in API for the exposed routine convert_network_to_scalar() is theoretically a back-patch hazard, but it seems pretty unlikely that any third-party code is calling that function directly. Tomas Vondra, with some adjustments by me Discussion: https://postgr.es/m/b68441b6-d18f-13ab-b43b-9a72188a4e02@2ndquadrant.com
2018-03-04 02:31:35 +01:00
r->lower = convert_network_to_scalar(entry->key, INETOID, &failure);
Assert(!failure);
r->upper = r->lower;
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, false);
2004-08-29 07:07:03 +02:00
}
else
retval = entry;
2004-08-29 07:07:03 +02:00
PG_RETURN_POINTER(retval);
}
Datum
gbt_inet_consistent(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
Fix assorted issues in convert_to_scalar(). If convert_to_scalar is passed a pair of datatypes it can't cope with, its former behavior was just to elog(ERROR). While this is OK so far as the core code is concerned, there's extension code that would like to use scalarltsel/scalargtsel/etc as selectivity estimators for operators that work on non-core datatypes, and this behavior is a show-stopper for that use-case. If we simply allow convert_to_scalar to return FALSE instead of outright failing, then the main logic of scalarltsel/scalargtsel will work fine for any operator that behaves like a scalar inequality comparison. The lack of conversion capability will mean that we can't estimate to better than histogram-bin-width precision, since the code will effectively assume that the comparison constant falls at the middle of its bin. But that's still a lot better than nothing. (Someday we should provide a way for extension code to supply a custom version of convert_to_scalar, but today is not that day.) While poking at this issue, we noted that the existing code for handling type bytea in convert_to_scalar is several bricks shy of a load. It assumes without checking that if the comparison value is type bytea, the bounds values are too; in the worst case this could lead to a crash. It also fails to detoast the input values, so that the comparison result is complete garbage if any input is toasted out-of-line, compressed, or even just short-header. I'm not sure how often such cases actually occur --- the bounds values, at least, are probably safe since they are elements of an array and hence can't be toasted. But that doesn't make this code OK. Back-patch to all supported branches, partly because author requested that, but mostly because of the bytea bugs. The change in API for the exposed routine convert_network_to_scalar() is theoretically a back-patch hazard, but it seems pretty unlikely that any third-party code is calling that function directly. Tomas Vondra, with some adjustments by me Discussion: https://postgr.es/m/b68441b6-d18f-13ab-b43b-9a72188a4e02@2ndquadrant.com
2018-03-04 02:31:35 +01:00
Datum dquery = PG_GETARG_DATUM(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
inetKEY *kkk = (inetKEY *) DatumGetPointer(entry->key);
GBT_NUMKEY_R key;
Fix assorted issues in convert_to_scalar(). If convert_to_scalar is passed a pair of datatypes it can't cope with, its former behavior was just to elog(ERROR). While this is OK so far as the core code is concerned, there's extension code that would like to use scalarltsel/scalargtsel/etc as selectivity estimators for operators that work on non-core datatypes, and this behavior is a show-stopper for that use-case. If we simply allow convert_to_scalar to return FALSE instead of outright failing, then the main logic of scalarltsel/scalargtsel will work fine for any operator that behaves like a scalar inequality comparison. The lack of conversion capability will mean that we can't estimate to better than histogram-bin-width precision, since the code will effectively assume that the comparison constant falls at the middle of its bin. But that's still a lot better than nothing. (Someday we should provide a way for extension code to supply a custom version of convert_to_scalar, but today is not that day.) While poking at this issue, we noted that the existing code for handling type bytea in convert_to_scalar is several bricks shy of a load. It assumes without checking that if the comparison value is type bytea, the bounds values are too; in the worst case this could lead to a crash. It also fails to detoast the input values, so that the comparison result is complete garbage if any input is toasted out-of-line, compressed, or even just short-header. I'm not sure how often such cases actually occur --- the bounds values, at least, are probably safe since they are elements of an array and hence can't be toasted. But that doesn't make this code OK. Back-patch to all supported branches, partly because author requested that, but mostly because of the bytea bugs. The change in API for the exposed routine convert_network_to_scalar() is theoretically a back-patch hazard, but it seems pretty unlikely that any third-party code is calling that function directly. Tomas Vondra, with some adjustments by me Discussion: https://postgr.es/m/b68441b6-d18f-13ab-b43b-9a72188a4e02@2ndquadrant.com
2018-03-04 02:31:35 +01:00
double query;
bool failure = false;
query = convert_network_to_scalar(dquery, INETOID, &failure);
Assert(!failure);
2004-08-29 07:07:03 +02:00
/* All cases served by this function are inexact */
*recheck = true;
key.lower = (GBT_NUMKEY *) &kkk->lower;
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_BOOL(gbt_num_consistent(&key, (void *) &query,
&strategy, GIST_LEAF(entry), &tinfo, fcinfo->flinfo));
}
Datum
gbt_inet_union(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
void *out = palloc(sizeof(inetKEY));
2004-08-29 07:07:03 +02:00
*(int *) PG_GETARG_POINTER(1) = sizeof(inetKEY);
PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo, fcinfo->flinfo));
}
Datum
gbt_inet_penalty(PG_FUNCTION_ARGS)
{
inetKEY *origentry = (inetKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
inetKEY *newentry = (inetKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *result = (float *) PG_GETARG_POINTER(2);
penalty_num(result, origentry->lower, origentry->upper, newentry->lower, newentry->upper);
2005-10-15 04:49:52 +02:00
PG_RETURN_POINTER(result);
}
Datum
gbt_inet_picksplit(PG_FUNCTION_ARGS)
{
PG_RETURN_POINTER(gbt_num_picksplit((GistEntryVector *) PG_GETARG_POINTER(0),
(GIST_SPLITVEC *) PG_GETARG_POINTER(1),
&tinfo, fcinfo->flinfo));
}
Datum
gbt_inet_same(PG_FUNCTION_ARGS)
{
inetKEY *b1 = (inetKEY *) PG_GETARG_POINTER(0);
inetKEY *b2 = (inetKEY *) PG_GETARG_POINTER(1);
bool *result = (bool *) PG_GETARG_POINTER(2);
*result = gbt_num_same((void *) b1, (void *) b2, &tinfo, fcinfo->flinfo);
PG_RETURN_POINTER(result);
}