1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* lsyscache.c
|
1999-08-09 05:13:31 +02:00
|
|
|
* Convenience routines for common queries in the system catalog cache.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2023-01-02 21:00:37 +01:00
|
|
|
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/utils/cache/lsyscache.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* NOTES
|
|
|
|
* Eventually, the index information should go through here, too.
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
2003-06-23 00:04:55 +02:00
|
|
|
#include "access/hash.h"
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "access/htup_details.h"
|
2007-01-09 03:14:16 +01:00
|
|
|
#include "access/nbtree.h"
|
2006-08-16 00:36:17 +02:00
|
|
|
#include "bootstrap/bootstrap.h"
|
2015-04-06 16:40:55 +02:00
|
|
|
#include "catalog/namespace.h"
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
#include "catalog/pg_am.h"
|
2001-08-21 18:36:06 +02:00
|
|
|
#include "catalog/pg_amop.h"
|
2003-06-23 00:04:55 +02:00
|
|
|
#include "catalog/pg_amproc.h"
|
2020-03-10 15:28:23 +01:00
|
|
|
#include "catalog/pg_cast.h"
|
2011-02-08 22:04:18 +01:00
|
|
|
#include "catalog/pg_collation.h"
|
2007-02-14 02:58:58 +01:00
|
|
|
#include "catalog/pg_constraint.h"
|
2015-04-26 16:33:14 +02:00
|
|
|
#include "catalog/pg_language.h"
|
2002-04-02 03:03:07 +02:00
|
|
|
#include "catalog/pg_namespace.h"
|
2001-08-21 18:36:06 +02:00
|
|
|
#include "catalog/pg_opclass.h"
|
1996-10-31 06:58:01 +01:00
|
|
|
#include "catalog/pg_operator.h"
|
1999-08-16 04:06:25 +02:00
|
|
|
#include "catalog/pg_proc.h"
|
2011-11-03 12:16:28 +01:00
|
|
|
#include "catalog/pg_range.h"
|
2001-05-07 02:43:27 +02:00
|
|
|
#include "catalog/pg_statistic.h"
|
2022-08-02 07:17:22 +02:00
|
|
|
#include "catalog/pg_subscription.h"
|
2015-04-26 16:33:14 +02:00
|
|
|
#include "catalog/pg_transform.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
#include "catalog/pg_type.h"
|
2006-08-16 00:36:17 +02:00
|
|
|
#include "miscadmin.h"
|
2002-03-20 20:45:13 +01:00
|
|
|
#include "nodes/makefuncs.h"
|
2001-05-07 02:43:27 +02:00
|
|
|
#include "utils/array.h"
|
|
|
|
#include "utils/builtins.h"
|
2012-08-29 00:26:24 +02:00
|
|
|
#include "utils/catcache.h"
|
2003-03-23 06:14:37 +01:00
|
|
|
#include "utils/datum.h"
|
2011-06-03 21:38:12 +02:00
|
|
|
#include "utils/fmgroids.h"
|
1999-07-16 07:23:30 +02:00
|
|
|
#include "utils/lsyscache.h"
|
2011-02-23 18:18:09 +01:00
|
|
|
#include "utils/rel.h"
|
1999-07-16 07:23:30 +02:00
|
|
|
#include "utils/syscache.h"
|
2010-10-31 02:55:20 +01:00
|
|
|
#include "utils/typcache.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2008-09-28 21:51:40 +02:00
|
|
|
/* Hook for plugins to get control in get_attavgwidth() */
|
|
|
|
get_attavgwidth_hook_type get_attavgwidth_hook = NULL;
|
|
|
|
|
2001-06-14 03:09:22 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ---------- AMOP CACHES ---------- */
|
|
|
|
|
|
|
|
/*
|
2006-12-23 01:43:13 +01:00
|
|
|
* op_in_opfamily
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2006-12-23 01:43:13 +01:00
|
|
|
* Return t iff operator 'opno' is in operator family 'opfamily'.
|
2010-11-24 20:20:39 +01:00
|
|
|
*
|
|
|
|
* This function only considers search operators, not ordering operators.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
bool
|
2006-12-23 01:43:13 +01:00
|
|
|
op_in_opfamily(Oid opno, Oid opfamily)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2010-11-24 20:20:39 +01:00
|
|
|
return SearchSysCacheExists3(AMOPOPID,
|
2010-02-14 19:42:19 +01:00
|
|
|
ObjectIdGetDatum(opno),
|
2010-11-24 20:20:39 +01:00
|
|
|
CharGetDatum(AMOP_SEARCH),
|
2010-02-14 19:42:19 +01:00
|
|
|
ObjectIdGetDatum(opfamily));
|
2001-08-21 18:36:06 +02:00
|
|
|
}
|
|
|
|
|
2005-04-12 01:06:57 +02:00
|
|
|
/*
|
2006-12-23 01:43:13 +01:00
|
|
|
* get_op_opfamily_strategy
|
2005-04-12 01:06:57 +02:00
|
|
|
*
|
2006-12-23 01:43:13 +01:00
|
|
|
* Get the operator's strategy number within the specified opfamily,
|
|
|
|
* or 0 if it's not a member of the opfamily.
|
2010-11-24 20:20:39 +01:00
|
|
|
*
|
|
|
|
* This function only considers search operators, not ordering operators.
|
2005-04-12 01:06:57 +02:00
|
|
|
*/
|
|
|
|
int
|
2006-12-23 01:43:13 +01:00
|
|
|
get_op_opfamily_strategy(Oid opno, Oid opfamily)
|
2005-04-12 01:06:57 +02:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_amop amop_tup;
|
|
|
|
int result;
|
|
|
|
|
2010-11-24 20:20:39 +01:00
|
|
|
tp = SearchSysCache3(AMOPOPID,
|
2010-02-14 19:42:19 +01:00
|
|
|
ObjectIdGetDatum(opno),
|
2010-11-24 20:20:39 +01:00
|
|
|
CharGetDatum(AMOP_SEARCH),
|
2010-02-14 19:42:19 +01:00
|
|
|
ObjectIdGetDatum(opfamily));
|
2005-04-12 01:06:57 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
return 0;
|
|
|
|
amop_tup = (Form_pg_amop) GETSTRUCT(tp);
|
|
|
|
result = amop_tup->amopstrategy;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2010-12-03 02:50:48 +01:00
|
|
|
/*
|
|
|
|
* get_op_opfamily_sortfamily
|
|
|
|
*
|
|
|
|
* If the operator is an ordering operator within the specified opfamily,
|
|
|
|
* return its amopsortfamily OID; else return InvalidOid.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_op_opfamily_sortfamily(Oid opno, Oid opfamily)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_amop amop_tup;
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
tp = SearchSysCache3(AMOPOPID,
|
|
|
|
ObjectIdGetDatum(opno),
|
|
|
|
CharGetDatum(AMOP_ORDER),
|
|
|
|
ObjectIdGetDatum(opfamily));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
return InvalidOid;
|
|
|
|
amop_tup = (Form_pg_amop) GETSTRUCT(tp);
|
|
|
|
result = amop_tup->amopsortfamily;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2001-08-21 18:36:06 +02:00
|
|
|
/*
|
2006-12-23 01:43:13 +01:00
|
|
|
* get_op_opfamily_properties
|
2001-08-21 18:36:06 +02:00
|
|
|
*
|
2008-04-13 22:51:21 +02:00
|
|
|
* Get the operator's strategy number and declared input data types
|
|
|
|
* within the specified opfamily.
|
2001-08-21 18:36:06 +02:00
|
|
|
*
|
2006-12-23 01:43:13 +01:00
|
|
|
* Caller should already have verified that opno is a member of opfamily,
|
2001-08-21 18:36:06 +02:00
|
|
|
* therefore we raise an error if the tuple is not found.
|
|
|
|
*/
|
2003-11-09 22:30:38 +01:00
|
|
|
void
|
2010-12-03 02:50:48 +01:00
|
|
|
get_op_opfamily_properties(Oid opno, Oid opfamily, bool ordering_op,
|
2006-12-23 01:43:13 +01:00
|
|
|
int *strategy,
|
|
|
|
Oid *lefttype,
|
2008-04-13 22:51:21 +02:00
|
|
|
Oid *righttype)
|
2001-08-21 18:36:06 +02:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_amop amop_tup;
|
|
|
|
|
2010-11-24 20:20:39 +01:00
|
|
|
tp = SearchSysCache3(AMOPOPID,
|
2010-02-14 19:42:19 +01:00
|
|
|
ObjectIdGetDatum(opno),
|
2010-12-03 02:50:48 +01:00
|
|
|
CharGetDatum(ordering_op ? AMOP_ORDER : AMOP_SEARCH),
|
2010-02-14 19:42:19 +01:00
|
|
|
ObjectIdGetDatum(opfamily));
|
2001-08-21 18:36:06 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2006-12-23 01:43:13 +01:00
|
|
|
elog(ERROR, "operator %u is not a member of opfamily %u",
|
|
|
|
opno, opfamily);
|
2001-08-21 18:36:06 +02:00
|
|
|
amop_tup = (Form_pg_amop) GETSTRUCT(tp);
|
2003-11-09 22:30:38 +01:00
|
|
|
*strategy = amop_tup->amopstrategy;
|
2006-12-23 01:43:13 +01:00
|
|
|
*lefttype = amop_tup->amoplefttype;
|
|
|
|
*righttype = amop_tup->amoprighttype;
|
2001-08-21 18:36:06 +02:00
|
|
|
ReleaseSysCache(tp);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2003-05-26 02:11:29 +02:00
|
|
|
/*
|
2006-12-23 01:43:13 +01:00
|
|
|
* get_opfamily_member
|
2003-05-26 02:11:29 +02:00
|
|
|
* Get the OID of the operator that implements the specified strategy
|
2006-12-23 01:43:13 +01:00
|
|
|
* with the specified datatypes for the specified opfamily.
|
2003-05-26 02:11:29 +02:00
|
|
|
*
|
|
|
|
* Returns InvalidOid if there is no pg_amop entry for the given keys.
|
|
|
|
*/
|
|
|
|
Oid
|
2006-12-23 01:43:13 +01:00
|
|
|
get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
|
|
|
|
int16 strategy)
|
2003-05-26 02:11:29 +02:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_amop amop_tup;
|
|
|
|
Oid result;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache4(AMOPSTRATEGY,
|
|
|
|
ObjectIdGetDatum(opfamily),
|
|
|
|
ObjectIdGetDatum(lefttype),
|
|
|
|
ObjectIdGetDatum(righttype),
|
|
|
|
Int16GetDatum(strategy));
|
2003-05-26 02:11:29 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
return InvalidOid;
|
|
|
|
amop_tup = (Form_pg_amop) GETSTRUCT(tp);
|
|
|
|
result = amop_tup->amopopr;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-01-09 03:14:16 +01:00
|
|
|
/*
|
2007-01-21 01:57:15 +01:00
|
|
|
* get_ordering_op_properties
|
|
|
|
* Given the OID of an ordering operator (a btree "<" or ">" operator),
|
|
|
|
* determine its opfamily, its declared input datatype, and its
|
|
|
|
* strategy number (BTLessStrategyNumber or BTGreaterStrategyNumber).
|
2007-01-09 03:14:16 +01:00
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* Returns true if successful, false if no matching pg_amop entry exists.
|
2007-01-09 03:14:16 +01:00
|
|
|
* (This indicates that the operator is not a valid ordering operator.)
|
2007-01-21 01:57:15 +01:00
|
|
|
*
|
|
|
|
* Note: the operator could be registered in multiple families, for example
|
|
|
|
* if someone were to build a "reverse sort" opfamily. This would result in
|
|
|
|
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
|
|
|
|
* or NULLS LAST, as well as inefficient planning due to failure to match up
|
|
|
|
* pathkeys that should be the same. So we want a determinate result here.
|
|
|
|
* Because of the way the syscache search works, we'll use the interpretation
|
|
|
|
* associated with the opfamily with smallest OID, which is probably
|
|
|
|
* determinate enough. Since there is no longer any particularly good reason
|
|
|
|
* to build reverse-sort opfamilies, it doesn't seem worth expending any
|
|
|
|
* additional effort on ensuring consistency.
|
2007-01-09 03:14:16 +01:00
|
|
|
*/
|
|
|
|
bool
|
2007-01-21 01:57:15 +01:00
|
|
|
get_ordering_op_properties(Oid opno,
|
|
|
|
Oid *opfamily, Oid *opcintype, int16 *strategy)
|
2007-01-09 03:14:16 +01:00
|
|
|
{
|
|
|
|
bool result = false;
|
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
2007-01-21 01:57:15 +01:00
|
|
|
/* ensure outputs are initialized on failure */
|
|
|
|
*opfamily = InvalidOid;
|
|
|
|
*opcintype = InvalidOid;
|
|
|
|
*strategy = 0;
|
2007-01-09 03:14:16 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Search pg_amop to see if the target operator is registered as the "<"
|
2007-01-21 01:57:15 +01:00
|
|
|
* or ">" operator of any btree opfamily.
|
2007-01-09 03:14:16 +01:00
|
|
|
*/
|
2010-02-14 19:42:19 +01:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno));
|
2007-01-09 03:14:16 +01:00
|
|
|
|
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple);
|
|
|
|
|
|
|
|
/* must be btree */
|
|
|
|
if (aform->amopmethod != BTREE_AM_OID)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (aform->amopstrategy == BTLessStrategyNumber ||
|
|
|
|
aform->amopstrategy == BTGreaterStrategyNumber)
|
|
|
|
{
|
2007-01-21 01:57:15 +01:00
|
|
|
/* Found it ... should have consistent input types */
|
|
|
|
if (aform->amoplefttype == aform->amoprighttype)
|
|
|
|
{
|
|
|
|
/* Found a suitable opfamily, return info */
|
|
|
|
*opfamily = aform->amopfamily;
|
|
|
|
*opcintype = aform->amoplefttype;
|
|
|
|
*strategy = aform->amopstrategy;
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
2007-01-09 03:14:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-01-10 19:06:05 +01:00
|
|
|
/*
|
|
|
|
* get_equality_op_for_ordering_op
|
|
|
|
* Get the OID of the datatype-specific btree equality operator
|
|
|
|
* associated with an ordering operator (a "<" or ">" operator).
|
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* If "reverse" isn't NULL, also set *reverse to false if the operator is "<",
|
|
|
|
* true if it's ">"
|
2008-08-02 23:32:01 +02:00
|
|
|
*
|
2007-01-10 19:06:05 +01:00
|
|
|
* Returns InvalidOid if no matching equality operator can be found.
|
|
|
|
* (This indicates that the operator is not a valid ordering operator.)
|
|
|
|
*/
|
|
|
|
Oid
|
2008-08-02 23:32:01 +02:00
|
|
|
get_equality_op_for_ordering_op(Oid opno, bool *reverse)
|
2007-01-10 19:06:05 +01:00
|
|
|
{
|
|
|
|
Oid result = InvalidOid;
|
2007-01-21 01:57:15 +01:00
|
|
|
Oid opfamily;
|
|
|
|
Oid opcintype;
|
|
|
|
int16 strategy;
|
2007-01-10 19:06:05 +01:00
|
|
|
|
2007-01-21 01:57:15 +01:00
|
|
|
/* Find the operator in pg_amop */
|
|
|
|
if (get_ordering_op_properties(opno,
|
|
|
|
&opfamily, &opcintype, &strategy))
|
2007-01-10 19:06:05 +01:00
|
|
|
{
|
2007-01-21 01:57:15 +01:00
|
|
|
/* Found a suitable opfamily, get matching equality operator */
|
|
|
|
result = get_opfamily_member(opfamily,
|
|
|
|
opcintype,
|
|
|
|
opcintype,
|
|
|
|
BTEqualStrategyNumber);
|
2008-08-02 23:32:01 +02:00
|
|
|
if (reverse)
|
|
|
|
*reverse = (strategy == BTGreaterStrategyNumber);
|
2007-01-10 19:06:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_ordering_op_for_equality_op
|
|
|
|
* Get the OID of a datatype-specific btree ordering operator
|
|
|
|
* associated with an equality operator. (If there are multiple
|
|
|
|
* possibilities, assume any one will do.)
|
|
|
|
*
|
|
|
|
* This function is used when we have to sort data before unique-ifying,
|
|
|
|
* and don't much care which sorting op is used as long as it's compatible
|
|
|
|
* with the intended equality operator. Since we need a sorting operator,
|
|
|
|
* it should be single-data-type even if the given operator is cross-type.
|
|
|
|
* The caller specifies whether to find an op for the LHS or RHS data type.
|
|
|
|
*
|
|
|
|
* Returns InvalidOid if no matching ordering operator can be found.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
|
|
|
|
{
|
|
|
|
Oid result = InvalidOid;
|
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search pg_amop to see if the target operator is registered as the "="
|
|
|
|
* operator of any btree opfamily.
|
|
|
|
*/
|
2010-02-14 19:42:19 +01:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno));
|
2007-01-10 19:06:05 +01:00
|
|
|
|
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple);
|
|
|
|
|
|
|
|
/* must be btree */
|
|
|
|
if (aform->amopmethod != BTREE_AM_OID)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (aform->amopstrategy == BTEqualStrategyNumber)
|
|
|
|
{
|
|
|
|
/* Found a suitable opfamily, get matching ordering operator */
|
|
|
|
Oid typid;
|
|
|
|
|
|
|
|
typid = use_lhs_type ? aform->amoplefttype : aform->amoprighttype;
|
|
|
|
result = get_opfamily_member(aform->amopfamily,
|
|
|
|
typid, typid,
|
|
|
|
BTLessStrategyNumber);
|
|
|
|
if (OidIsValid(result))
|
|
|
|
break;
|
|
|
|
/* failure probably shouldn't happen, but keep looking if so */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-01-20 21:45:41 +01:00
|
|
|
/*
|
|
|
|
* get_mergejoin_opfamilies
|
|
|
|
* Given a putatively mergejoinable operator, return a list of the OIDs
|
|
|
|
* of the btree opfamilies in which it represents equality.
|
|
|
|
*
|
|
|
|
* It is possible (though at present unusual) for an operator to be equality
|
|
|
|
* in more than one opfamily, hence the result is a list. This also lets us
|
|
|
|
* return NIL if the operator is not found in any opfamilies.
|
|
|
|
*
|
|
|
|
* The planner currently uses simple equal() tests to compare the lists
|
|
|
|
* returned by this function, which makes the list order relevant, though
|
|
|
|
* strictly speaking it should not be. Because of the way syscache list
|
|
|
|
* searches are handled, in normal operation the result will be sorted by OID
|
|
|
|
* so everything works fine. If running with system index usage disabled,
|
|
|
|
* the result ordering is unspecified and hence the planner might fail to
|
|
|
|
* recognize optimization opportunities ... but that's hardly a scenario in
|
|
|
|
* which performance is good anyway, so there's no point in expending code
|
|
|
|
* or cycles here to guarantee the ordering in that case.
|
|
|
|
*/
|
|
|
|
List *
|
|
|
|
get_mergejoin_opfamilies(Oid opno)
|
|
|
|
{
|
|
|
|
List *result = NIL;
|
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search pg_amop to see if the target operator is registered as the "="
|
|
|
|
* operator of any btree opfamily.
|
|
|
|
*/
|
2010-02-14 19:42:19 +01:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno));
|
2007-01-20 21:45:41 +01:00
|
|
|
|
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple);
|
|
|
|
|
|
|
|
/* must be btree equality */
|
|
|
|
if (aform->amopmethod == BTREE_AM_OID &&
|
|
|
|
aform->amopstrategy == BTEqualStrategyNumber)
|
|
|
|
result = lappend_oid(result, aform->amopfamily);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-01-10 19:06:05 +01:00
|
|
|
/*
|
2007-01-30 02:33:36 +01:00
|
|
|
* get_compatible_hash_operators
|
|
|
|
* Get the OID(s) of hash equality operator(s) compatible with the given
|
|
|
|
* operator, but operating on its LHS and/or RHS datatype.
|
2007-01-10 19:06:05 +01:00
|
|
|
*
|
2007-01-30 02:33:36 +01:00
|
|
|
* An operator for the LHS type is sought and returned into *lhs_opno if
|
|
|
|
* lhs_opno isn't NULL. Similarly, an operator for the RHS type is sought
|
|
|
|
* and returned into *rhs_opno if rhs_opno isn't NULL.
|
2007-01-10 19:06:05 +01:00
|
|
|
*
|
2007-01-30 02:33:36 +01:00
|
|
|
* If the given operator is not cross-type, the results should be the same
|
|
|
|
* operator, but in cross-type situations they will be different.
|
|
|
|
*
|
|
|
|
* Returns true if able to find the requested operator(s), false if not.
|
|
|
|
* (This indicates that the operator should not have been marked oprcanhash.)
|
2007-01-10 19:06:05 +01:00
|
|
|
*/
|
2007-01-30 02:33:36 +01:00
|
|
|
bool
|
|
|
|
get_compatible_hash_operators(Oid opno,
|
|
|
|
Oid *lhs_opno, Oid *rhs_opno)
|
2007-01-10 19:06:05 +01:00
|
|
|
{
|
2007-01-30 02:33:36 +01:00
|
|
|
bool result = false;
|
2007-01-10 19:06:05 +01:00
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
2007-01-30 02:33:36 +01:00
|
|
|
/* Ensure output args are initialized on failure */
|
|
|
|
if (lhs_opno)
|
|
|
|
*lhs_opno = InvalidOid;
|
|
|
|
if (rhs_opno)
|
|
|
|
*rhs_opno = InvalidOid;
|
|
|
|
|
2007-01-10 19:06:05 +01:00
|
|
|
/*
|
|
|
|
* Search pg_amop to see if the target operator is registered as the "="
|
|
|
|
* operator of any hash opfamily. If the operator is registered in
|
|
|
|
* multiple opfamilies, assume we can use any one.
|
|
|
|
*/
|
2010-02-14 19:42:19 +01:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno));
|
2007-01-10 19:06:05 +01:00
|
|
|
|
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple);
|
|
|
|
|
|
|
|
if (aform->amopmethod == HASH_AM_OID &&
|
|
|
|
aform->amopstrategy == HTEqualStrategyNumber)
|
|
|
|
{
|
|
|
|
/* No extra lookup needed if given operator is single-type */
|
|
|
|
if (aform->amoplefttype == aform->amoprighttype)
|
|
|
|
{
|
2007-01-30 02:33:36 +01:00
|
|
|
if (lhs_opno)
|
|
|
|
*lhs_opno = opno;
|
|
|
|
if (rhs_opno)
|
|
|
|
*rhs_opno = opno;
|
|
|
|
result = true;
|
2007-01-10 19:06:05 +01:00
|
|
|
break;
|
|
|
|
}
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2007-01-30 02:33:36 +01:00
|
|
|
/*
|
|
|
|
* Get the matching single-type operator(s). Failure probably
|
|
|
|
* shouldn't happen --- it implies a bogus opfamily --- but
|
|
|
|
* continue looking if so.
|
|
|
|
*/
|
|
|
|
if (lhs_opno)
|
|
|
|
{
|
|
|
|
*lhs_opno = get_opfamily_member(aform->amopfamily,
|
|
|
|
aform->amoplefttype,
|
|
|
|
aform->amoplefttype,
|
|
|
|
HTEqualStrategyNumber);
|
|
|
|
if (!OidIsValid(*lhs_opno))
|
|
|
|
continue;
|
|
|
|
/* Matching LHS found, done if caller doesn't want RHS */
|
|
|
|
if (!rhs_opno)
|
|
|
|
{
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rhs_opno)
|
|
|
|
{
|
|
|
|
*rhs_opno = get_opfamily_member(aform->amopfamily,
|
|
|
|
aform->amoprighttype,
|
|
|
|
aform->amoprighttype,
|
|
|
|
HTEqualStrategyNumber);
|
|
|
|
if (!OidIsValid(*rhs_opno))
|
|
|
|
{
|
|
|
|
/* Forget any LHS operator from this opfamily */
|
|
|
|
if (lhs_opno)
|
|
|
|
*lhs_opno = InvalidOid;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Matching RHS found, so done */
|
|
|
|
result = true;
|
2007-01-10 19:06:05 +01:00
|
|
|
break;
|
2007-01-30 02:33:36 +01:00
|
|
|
}
|
2007-01-10 19:06:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2003-06-23 00:04:55 +02:00
|
|
|
/*
|
2007-01-30 02:33:36 +01:00
|
|
|
* get_op_hash_functions
|
2017-09-01 04:21:21 +02:00
|
|
|
* Get the OID(s) of the standard hash support function(s) compatible with
|
|
|
|
* the given operator, operating on its LHS and/or RHS datatype as required.
|
2007-01-30 02:33:36 +01:00
|
|
|
*
|
|
|
|
* A function for the LHS type is sought and returned into *lhs_procno if
|
|
|
|
* lhs_procno isn't NULL. Similarly, a function for the RHS type is sought
|
|
|
|
* and returned into *rhs_procno if rhs_procno isn't NULL.
|
2003-06-23 00:04:55 +02:00
|
|
|
*
|
2007-01-30 02:33:36 +01:00
|
|
|
* If the given operator is not cross-type, the results should be the same
|
|
|
|
* function, but in cross-type situations they will be different.
|
2006-12-23 01:43:13 +01:00
|
|
|
*
|
2007-01-30 02:33:36 +01:00
|
|
|
* Returns true if able to find the requested function(s), false if not.
|
|
|
|
* (This indicates that the operator should not have been marked oprcanhash.)
|
2003-06-23 00:04:55 +02:00
|
|
|
*/
|
2007-01-30 02:33:36 +01:00
|
|
|
bool
|
|
|
|
get_op_hash_functions(Oid opno,
|
|
|
|
RegProcedure *lhs_procno, RegProcedure *rhs_procno)
|
2003-06-23 00:04:55 +02:00
|
|
|
{
|
2007-01-30 02:33:36 +01:00
|
|
|
bool result = false;
|
2003-06-23 00:04:55 +02:00
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
2007-01-30 02:33:36 +01:00
|
|
|
/* Ensure output args are initialized on failure */
|
|
|
|
if (lhs_procno)
|
|
|
|
*lhs_procno = InvalidOid;
|
|
|
|
if (rhs_procno)
|
|
|
|
*rhs_procno = InvalidOid;
|
|
|
|
|
2003-06-23 00:04:55 +02:00
|
|
|
/*
|
|
|
|
* Search pg_amop to see if the target operator is registered as the "="
|
2006-12-23 01:43:13 +01:00
|
|
|
* operator of any hash opfamily. If the operator is registered in
|
2007-01-30 02:33:36 +01:00
|
|
|
* multiple opfamilies, assume we can use any one.
|
2003-06-23 00:04:55 +02:00
|
|
|
*/
|
2010-02-14 19:42:19 +01:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno));
|
2003-06-23 00:04:55 +02:00
|
|
|
|
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
2003-08-17 21:58:06 +02:00
|
|
|
HeapTuple tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop aform = (Form_pg_amop) GETSTRUCT(tuple);
|
2003-06-23 00:04:55 +02:00
|
|
|
|
2006-12-23 01:43:13 +01:00
|
|
|
if (aform->amopmethod == HASH_AM_OID &&
|
|
|
|
aform->amopstrategy == HTEqualStrategyNumber)
|
2003-06-23 00:04:55 +02:00
|
|
|
{
|
2007-01-30 02:33:36 +01:00
|
|
|
/*
|
|
|
|
* Get the matching support function(s). Failure probably
|
|
|
|
* shouldn't happen --- it implies a bogus opfamily --- but
|
|
|
|
* continue looking if so.
|
|
|
|
*/
|
|
|
|
if (lhs_procno)
|
|
|
|
{
|
|
|
|
*lhs_procno = get_opfamily_proc(aform->amopfamily,
|
|
|
|
aform->amoplefttype,
|
|
|
|
aform->amoplefttype,
|
2017-09-01 04:21:21 +02:00
|
|
|
HASHSTANDARD_PROC);
|
2007-01-30 02:33:36 +01:00
|
|
|
if (!OidIsValid(*lhs_procno))
|
|
|
|
continue;
|
|
|
|
/* Matching LHS found, done if caller doesn't want RHS */
|
|
|
|
if (!rhs_procno)
|
|
|
|
{
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Only one lookup needed if given operator is single-type */
|
|
|
|
if (aform->amoplefttype == aform->amoprighttype)
|
|
|
|
{
|
|
|
|
*rhs_procno = *lhs_procno;
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rhs_procno)
|
|
|
|
{
|
|
|
|
*rhs_procno = get_opfamily_proc(aform->amopfamily,
|
|
|
|
aform->amoprighttype,
|
|
|
|
aform->amoprighttype,
|
2017-09-01 04:21:21 +02:00
|
|
|
HASHSTANDARD_PROC);
|
2007-01-30 02:33:36 +01:00
|
|
|
if (!OidIsValid(*rhs_procno))
|
|
|
|
{
|
|
|
|
/* Forget any LHS function from this opfamily */
|
|
|
|
if (lhs_procno)
|
|
|
|
*lhs_procno = InvalidOid;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Matching RHS found, so done */
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
2003-06-23 00:04:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
2006-12-23 01:43:13 +01:00
|
|
|
return result;
|
2003-06-23 00:04:55 +02:00
|
|
|
}
|
|
|
|
|
2005-12-28 02:30:02 +01:00
|
|
|
/*
|
|
|
|
* get_op_btree_interpretation
|
2006-12-23 01:43:13 +01:00
|
|
|
* Given an operator's OID, find out which btree opfamilies it belongs to,
|
2011-07-06 20:53:16 +02:00
|
|
|
* and what properties it has within each one. The results are returned
|
|
|
|
* as a palloc'd list of OpBtreeInterpretation structs.
|
2005-12-28 02:30:02 +01:00
|
|
|
*
|
|
|
|
* In addition to the normal btree operators, we consider a <> operator to be
|
2006-12-23 01:43:13 +01:00
|
|
|
* a "member" of an opfamily if its negator is an equality operator of the
|
|
|
|
* opfamily. ROWCOMPARE_NE is returned as the strategy number for this case.
|
2005-12-28 02:30:02 +01:00
|
|
|
*/
|
2011-07-06 20:53:16 +02:00
|
|
|
List *
|
|
|
|
get_op_btree_interpretation(Oid opno)
|
2005-12-28 02:30:02 +01:00
|
|
|
{
|
2011-07-06 20:53:16 +02:00
|
|
|
List *result = NIL;
|
|
|
|
OpBtreeInterpretation *thisresult;
|
2005-12-28 02:30:02 +01:00
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find all the pg_amop entries containing the operator.
|
|
|
|
*/
|
2010-02-14 19:42:19 +01:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno));
|
2006-10-04 02:30:14 +02:00
|
|
|
|
2005-12-28 02:30:02 +01:00
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple op_tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop op_form = (Form_pg_amop) GETSTRUCT(op_tuple);
|
|
|
|
StrategyNumber op_strategy;
|
|
|
|
|
|
|
|
/* must be btree */
|
2006-12-23 01:43:13 +01:00
|
|
|
if (op_form->amopmethod != BTREE_AM_OID)
|
2005-12-28 02:30:02 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Get the operator's btree strategy number */
|
|
|
|
op_strategy = (StrategyNumber) op_form->amopstrategy;
|
|
|
|
Assert(op_strategy >= 1 && op_strategy <= 5);
|
|
|
|
|
2011-07-06 20:53:16 +02:00
|
|
|
thisresult = (OpBtreeInterpretation *)
|
|
|
|
palloc(sizeof(OpBtreeInterpretation));
|
|
|
|
thisresult->opfamily_id = op_form->amopfamily;
|
|
|
|
thisresult->strategy = op_strategy;
|
|
|
|
thisresult->oplefttype = op_form->amoplefttype;
|
|
|
|
thisresult->oprighttype = op_form->amoprighttype;
|
|
|
|
result = lappend(result, thisresult);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we didn't find any btree opfamily containing the operator, perhaps
|
|
|
|
* it is a <> operator. See if it has a negator that is in an opfamily.
|
|
|
|
*/
|
|
|
|
if (result == NIL)
|
|
|
|
{
|
|
|
|
Oid op_negator = get_negator(opno);
|
|
|
|
|
|
|
|
if (OidIsValid(op_negator))
|
2005-12-28 02:30:02 +01:00
|
|
|
{
|
2011-07-06 20:53:16 +02:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID,
|
|
|
|
ObjectIdGetDatum(op_negator));
|
2005-12-28 02:30:02 +01:00
|
|
|
|
2011-07-06 20:53:16 +02:00
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple op_tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop op_form = (Form_pg_amop) GETSTRUCT(op_tuple);
|
|
|
|
StrategyNumber op_strategy;
|
|
|
|
|
|
|
|
/* must be btree */
|
|
|
|
if (op_form->amopmethod != BTREE_AM_OID)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Get the operator's btree strategy number */
|
|
|
|
op_strategy = (StrategyNumber) op_form->amopstrategy;
|
|
|
|
Assert(op_strategy >= 1 && op_strategy <= 5);
|
|
|
|
|
|
|
|
/* Only consider negators that are = */
|
|
|
|
if (op_strategy != BTEqualStrategyNumber)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* OK, report it with "strategy" ROWCOMPARE_NE */
|
|
|
|
thisresult = (OpBtreeInterpretation *)
|
|
|
|
palloc(sizeof(OpBtreeInterpretation));
|
|
|
|
thisresult->opfamily_id = op_form->amopfamily;
|
|
|
|
thisresult->strategy = ROWCOMPARE_NE;
|
|
|
|
thisresult->oplefttype = op_form->amoplefttype;
|
|
|
|
thisresult->oprighttype = op_form->amoprighttype;
|
|
|
|
result = lappend(result, thisresult);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
}
|
2005-12-28 02:30:02 +01:00
|
|
|
}
|
|
|
|
|
2011-07-06 20:53:16 +02:00
|
|
|
return result;
|
2005-12-28 02:30:02 +01:00
|
|
|
}
|
|
|
|
|
2007-01-10 19:06:05 +01:00
|
|
|
/*
|
2008-08-02 23:32:01 +02:00
|
|
|
* equality_ops_are_compatible
|
2017-08-16 06:22:32 +02:00
|
|
|
* Return true if the two given equality operators have compatible
|
2008-08-02 23:32:01 +02:00
|
|
|
* semantics.
|
|
|
|
*
|
|
|
|
* This is trivially true if they are the same operator. Otherwise,
|
|
|
|
* we look to see if they can be found in the same btree or hash opfamily.
|
|
|
|
* Either finding allows us to assume that they have compatible notions
|
|
|
|
* of equality. (The reason we need to do these pushups is that one might
|
|
|
|
* be a cross-type operator; for instance int24eq vs int4eq.)
|
2007-01-10 19:06:05 +01:00
|
|
|
*/
|
|
|
|
bool
|
2008-08-02 23:32:01 +02:00
|
|
|
equality_ops_are_compatible(Oid opno1, Oid opno2)
|
2007-01-10 19:06:05 +01:00
|
|
|
{
|
2008-08-02 23:32:01 +02:00
|
|
|
bool result;
|
2007-01-10 19:06:05 +01:00
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
2008-08-02 23:32:01 +02:00
|
|
|
/* Easy if they're the same operator */
|
|
|
|
if (opno1 == opno2)
|
|
|
|
return true;
|
|
|
|
|
2007-01-10 19:06:05 +01:00
|
|
|
/*
|
|
|
|
* We search through all the pg_amop entries for opno1.
|
|
|
|
*/
|
2010-02-14 19:42:19 +01:00
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno1));
|
2008-08-02 23:32:01 +02:00
|
|
|
|
|
|
|
result = false;
|
2007-01-10 19:06:05 +01:00
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple op_tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop op_form = (Form_pg_amop) GETSTRUCT(op_tuple);
|
|
|
|
|
2008-08-02 23:32:01 +02:00
|
|
|
/* must be btree or hash */
|
|
|
|
if (op_form->amopmethod == BTREE_AM_OID ||
|
|
|
|
op_form->amopmethod == HASH_AM_OID)
|
2007-01-10 19:06:05 +01:00
|
|
|
{
|
2008-08-02 23:32:01 +02:00
|
|
|
if (op_in_opfamily(opno2, op_form->amopfamily))
|
|
|
|
{
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
2007-01-10 19:06:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
Improve ineq_histogram_selectivity's behavior for non-default orderings.
ineq_histogram_selectivity() can be invoked in situations where the
ordering we care about is not that of the column's histogram. We could
be considering some other collation, or even more drastically, the
query operator might not agree at all with what was used to construct
the histogram. (We'll get here for anything using scalarineqsel-based
estimators, so that's quite likely to happen for extension operators.)
Up to now we just ignored this issue and assumed we were dealing with
an operator/collation whose sort order exactly matches the histogram,
possibly resulting in junk estimates if the binary search gets confused.
It's past time to improve that, since the use of nondefault collations
is increasing. What we can do is verify that the given operator and
collation match what's recorded in pg_statistic, and use the existing
code only if so. When they don't match, instead execute the operator
against each histogram entry, and take the fraction of successes as our
selectivity estimate. This gives an estimate that is probably good to
about 1/histogram_size, with no assumptions about ordering. (The quality
of the estimate is likely to degrade near the ends of the value range,
since the two orderings probably don't agree on what is an extremal value;
but this is surely going to be more reliable than what we did before.)
At some point we might further improve matters by storing more than one
histogram calculated according to different orderings. But this code
would still be good fallback logic when no matches exist, so that is
not an argument for not doing this.
While here, also improve get_variable_range() to deal more honestly
with non-default collations.
This isn't back-patchable, because it requires adding another argument
to ineq_histogram_selectivity, and because it might have significant
impact on the estimation results for extension operators relying on
scalarineqsel --- mostly for the better, one hopes, but in any case
destabilizing plan choices in back branches is best avoided.
Per investigation of a report from James Lucas.
Discussion: https://postgr.es/m/CAAFmbbOvfi=wMM=3qRsPunBSLb8BFREno2oOzSBS=mzfLPKABw@mail.gmail.com
2020-06-05 22:55:16 +02:00
|
|
|
/*
|
|
|
|
* comparison_ops_are_compatible
|
|
|
|
* Return true if the two given comparison operators have compatible
|
|
|
|
* semantics.
|
|
|
|
*
|
|
|
|
* This is trivially true if they are the same operator. Otherwise,
|
|
|
|
* we look to see if they can be found in the same btree opfamily.
|
|
|
|
* For example, '<' and '>=' ops match if they belong to the same family.
|
|
|
|
*
|
|
|
|
* (This is identical to equality_ops_are_compatible(), except that we
|
|
|
|
* don't bother to examine hash opclasses.)
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
comparison_ops_are_compatible(Oid opno1, Oid opno2)
|
|
|
|
{
|
|
|
|
bool result;
|
|
|
|
CatCList *catlist;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Easy if they're the same operator */
|
|
|
|
if (opno1 == opno2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We search through all the pg_amop entries for opno1.
|
|
|
|
*/
|
|
|
|
catlist = SearchSysCacheList1(AMOPOPID, ObjectIdGetDatum(opno1));
|
|
|
|
|
|
|
|
result = false;
|
|
|
|
for (i = 0; i < catlist->n_members; i++)
|
|
|
|
{
|
|
|
|
HeapTuple op_tuple = &catlist->members[i]->tuple;
|
|
|
|
Form_pg_amop op_form = (Form_pg_amop) GETSTRUCT(op_tuple);
|
|
|
|
|
|
|
|
if (op_form->amopmethod == BTREE_AM_OID)
|
|
|
|
{
|
|
|
|
if (op_in_opfamily(opno2, op_form->amopfamily))
|
|
|
|
{
|
|
|
|
result = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCacheList(catlist);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2003-05-26 02:11:29 +02:00
|
|
|
|
2003-08-17 21:58:06 +02:00
|
|
|
/* ---------- AMPROC CACHES ---------- */
|
|
|
|
|
|
|
|
/*
|
2006-12-23 01:43:13 +01:00
|
|
|
* get_opfamily_proc
|
2003-08-17 21:58:06 +02:00
|
|
|
* Get the OID of the specified support function
|
2006-12-23 01:43:13 +01:00
|
|
|
* for the specified opfamily and datatypes.
|
2003-08-17 21:58:06 +02:00
|
|
|
*
|
|
|
|
* Returns InvalidOid if there is no pg_amproc entry for the given keys.
|
|
|
|
*/
|
|
|
|
Oid
|
2006-12-23 01:43:13 +01:00
|
|
|
get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
|
2003-08-17 21:58:06 +02:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_amproc amproc_tup;
|
|
|
|
RegProcedure result;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache4(AMPROCNUM,
|
|
|
|
ObjectIdGetDatum(opfamily),
|
|
|
|
ObjectIdGetDatum(lefttype),
|
|
|
|
ObjectIdGetDatum(righttype),
|
|
|
|
Int16GetDatum(procnum));
|
2003-08-17 21:58:06 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
return InvalidOid;
|
|
|
|
amproc_tup = (Form_pg_amproc) GETSTRUCT(tp);
|
|
|
|
result = amproc_tup->amproc;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ---------- ATTRIBUTE CACHES ---------- */
|
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_attname
|
2018-02-12 23:30:30 +01:00
|
|
|
* Given the relation id and the attribute number, return the "attname"
|
|
|
|
* field from the attribute relation as a palloc'ed string.
|
2000-11-16 23:30:52 +01:00
|
|
|
*
|
2018-02-12 23:30:30 +01:00
|
|
|
* If no such attribute exists and missing_ok is true, NULL is returned;
|
|
|
|
* otherwise a not-intended-for-user-consumption error is thrown.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
char *
|
2018-02-12 23:30:30 +01:00
|
|
|
get_attname(Oid relid, AttrNumber attnum, bool missing_ok)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache2(ATTNUM,
|
2018-02-12 23:30:30 +01:00
|
|
|
ObjectIdGetDatum(relid), Int16GetDatum(attnum));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
char *result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = pstrdup(NameStr(att_tup->attname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
2003-08-12 01:04:50 +02:00
|
|
|
|
2018-02-12 23:30:30 +01:00
|
|
|
if (!missing_ok)
|
2003-08-12 01:04:50 +02:00
|
|
|
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
|
|
|
|
attnum, relid);
|
2018-02-12 23:30:30 +01:00
|
|
|
return NULL;
|
2003-08-12 01:04:50 +02:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_attnum
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Given the relation id and the attribute name,
|
|
|
|
* return the "attnum" field from the attribute relation.
|
2002-08-02 20:15:10 +02:00
|
|
|
*
|
|
|
|
* Returns InvalidAttrNumber if the attr doesn't exist (or is dropped).
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
AttrNumber
|
2002-08-02 20:15:10 +02:00
|
|
|
get_attnum(Oid relid, const char *attname)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-08-02 20:15:10 +02:00
|
|
|
tp = SearchSysCacheAttName(relid, attname);
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
AttrNumber result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = att_tup->attnum;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1998-01-20 06:05:08 +01:00
|
|
|
return InvalidAttrNumber;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2021-02-10 05:06:48 +01:00
|
|
|
/*
|
|
|
|
* get_attstattarget
|
|
|
|
*
|
|
|
|
* Given the relation id and the attribute number,
|
|
|
|
* return the "attstattarget" field from the attribute relation.
|
|
|
|
*
|
|
|
|
* Errors if not found.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
get_attstattarget(Oid relid, AttrNumber attnum)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_attribute att_tup;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
tp = SearchSysCache2(ATTNUM,
|
|
|
|
ObjectIdGetDatum(relid),
|
|
|
|
Int16GetDatum(attnum));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
|
|
|
|
attnum, relid);
|
|
|
|
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
|
|
|
|
result = att_tup->attstattarget;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-03-30 08:13:09 +01:00
|
|
|
/*
|
|
|
|
* get_attgenerated
|
|
|
|
*
|
2020-10-25 10:39:00 +01:00
|
|
|
* Given the relation id and the attribute number,
|
2019-03-30 08:13:09 +01:00
|
|
|
* return the "attgenerated" field from the attribute relation.
|
|
|
|
*
|
|
|
|
* Errors if not found.
|
|
|
|
*
|
|
|
|
* Since not generated is represented by '\0', this can also be used as a
|
|
|
|
* Boolean test.
|
|
|
|
*/
|
|
|
|
char
|
|
|
|
get_attgenerated(Oid relid, AttrNumber attnum)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
2019-04-05 09:23:07 +02:00
|
|
|
Form_pg_attribute att_tup;
|
|
|
|
char result;
|
2019-03-30 08:13:09 +01:00
|
|
|
|
|
|
|
tp = SearchSysCache2(ATTNUM,
|
|
|
|
ObjectIdGetDatum(relid),
|
|
|
|
Int16GetDatum(attnum));
|
2019-04-05 09:23:07 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2019-03-30 08:13:09 +01:00
|
|
|
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
|
|
|
|
attnum, relid);
|
2019-04-05 09:23:07 +02:00
|
|
|
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
|
|
|
|
result = att_tup->attgenerated;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
2019-03-30 08:13:09 +01:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_atttype
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Given the relation OID and the attribute number with the relation,
|
|
|
|
* return the attribute type OID.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_atttype(Oid relid, AttrNumber attnum)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache2(ATTNUM,
|
|
|
|
ObjectIdGetDatum(relid),
|
|
|
|
Int16GetDatum(attnum));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
Oid result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = att_tup->atttypid;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1999-05-29 03:45:21 +02:00
|
|
|
return InvalidOid;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2011-02-08 22:04:18 +01:00
|
|
|
/*
|
2011-03-26 19:25:48 +01:00
|
|
|
* get_atttypetypmodcoll
|
2011-02-08 22:04:18 +01:00
|
|
|
*
|
2011-03-26 19:25:48 +01:00
|
|
|
* A three-fer: given the relation id and the attribute number,
|
|
|
|
* fetch atttypid, atttypmod, and attcollation in a single cache lookup.
|
2001-05-10 01:13:37 +02:00
|
|
|
*
|
2018-10-18 19:28:28 +02:00
|
|
|
* Unlike the otherwise-similar get_atttype, this routine
|
2001-05-10 01:13:37 +02:00
|
|
|
* raises an error if it can't obtain the information.
|
|
|
|
*/
|
|
|
|
void
|
2011-03-26 19:25:48 +01:00
|
|
|
get_atttypetypmodcoll(Oid relid, AttrNumber attnum,
|
|
|
|
Oid *typid, int32 *typmod, Oid *collid)
|
2001-05-10 01:13:37 +02:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_attribute att_tup;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache2(ATTNUM,
|
|
|
|
ObjectIdGetDatum(relid),
|
|
|
|
Int16GetDatum(attnum));
|
2001-05-10 01:13:37 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2003-07-28 02:09:16 +02:00
|
|
|
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
|
|
|
|
attnum, relid);
|
2001-05-10 01:13:37 +02:00
|
|
|
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
|
|
|
|
|
|
|
|
*typid = att_tup->atttypid;
|
|
|
|
*typmod = att_tup->atttypmod;
|
2011-03-26 19:25:48 +01:00
|
|
|
*collid = att_tup->attcollation;
|
2001-05-10 01:13:37 +02:00
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
|
|
|
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
/*
|
|
|
|
* get_attoptions
|
|
|
|
*
|
|
|
|
* Given the relation id and the attribute number,
|
|
|
|
* return the attribute options text[] datum, if any.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
get_attoptions(Oid relid, int16 attnum)
|
|
|
|
{
|
|
|
|
HeapTuple tuple;
|
|
|
|
Datum attopts;
|
|
|
|
Datum result;
|
|
|
|
bool isnull;
|
|
|
|
|
|
|
|
tuple = SearchSysCache2(ATTNUM,
|
|
|
|
ObjectIdGetDatum(relid),
|
|
|
|
Int16GetDatum(attnum));
|
|
|
|
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
|
|
|
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
|
|
|
|
attnum, relid);
|
|
|
|
|
|
|
|
attopts = SysCacheGetAttr(ATTNAME, tuple, Anum_pg_attribute_attoptions,
|
|
|
|
&isnull);
|
|
|
|
|
|
|
|
if (isnull)
|
|
|
|
result = (Datum) 0;
|
|
|
|
else
|
|
|
|
result = datumCopy(attopts, false, -1); /* text[] */
|
|
|
|
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-03-10 15:28:23 +01:00
|
|
|
/* ---------- PG_CAST CACHE ---------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_cast_oid - given two type OIDs, look up a cast OID
|
|
|
|
*
|
|
|
|
* If missing_ok is false, throw an error if the cast is not found. If
|
|
|
|
* true, just return InvalidOid.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok)
|
|
|
|
{
|
|
|
|
Oid oid;
|
|
|
|
|
|
|
|
oid = GetSysCacheOid2(CASTSOURCETARGET, Anum_pg_cast_oid,
|
|
|
|
ObjectIdGetDatum(sourcetypeid),
|
|
|
|
ObjectIdGetDatum(targettypeid));
|
|
|
|
if (!OidIsValid(oid) && !missing_ok)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("cast from type %s to type %s does not exist",
|
|
|
|
format_type_be(sourcetypeid),
|
|
|
|
format_type_be(targettypeid))));
|
|
|
|
return oid;
|
|
|
|
}
|
|
|
|
|
2011-02-08 22:04:18 +01:00
|
|
|
/* ---------- COLLATION CACHE ---------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_collation_name
|
|
|
|
* Returns the name of a given pg_collation entry.
|
|
|
|
*
|
2018-01-03 23:12:06 +01:00
|
|
|
* Returns a palloc'd copy of the string, or NULL if no such collation.
|
2011-02-08 22:04:18 +01:00
|
|
|
*
|
|
|
|
* NOTE: since collation name is not unique, be wary of code that uses this
|
|
|
|
* for anything except preparing error messages.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_collation_name(Oid colloid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(colloid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_collation colltup = (Form_pg_collation) GETSTRUCT(tp);
|
|
|
|
char *result;
|
|
|
|
|
|
|
|
result = pstrdup(NameStr(colltup->collname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-03-22 12:09:32 +01:00
|
|
|
bool
|
|
|
|
get_collation_isdeterministic(Oid colloid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_collation colltup;
|
|
|
|
bool result;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(colloid));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for collation %u", colloid);
|
|
|
|
colltup = (Form_pg_collation) GETSTRUCT(tp);
|
|
|
|
result = colltup->collisdeterministic;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-02-14 02:58:58 +01:00
|
|
|
/* ---------- CONSTRAINT CACHE ---------- */
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2007-02-14 02:58:58 +01:00
|
|
|
/*
|
|
|
|
* get_constraint_name
|
|
|
|
* Returns the name of a given pg_constraint entry.
|
|
|
|
*
|
|
|
|
* Returns a palloc'd copy of the string, or NULL if no such constraint.
|
|
|
|
*
|
|
|
|
* NOTE: since constraint name is not unique, be wary of code that uses this
|
|
|
|
* for anything except preparing error messages.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2007-02-14 02:58:58 +01:00
|
|
|
char *
|
|
|
|
get_constraint_name(Oid conoid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conoid));
|
2007-02-14 02:58:58 +01:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_constraint contup = (Form_pg_constraint) GETSTRUCT(tp);
|
|
|
|
char *result;
|
|
|
|
|
|
|
|
result = pstrdup(NameStr(contup->conname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2020-12-09 15:12:05 +01:00
|
|
|
/*
|
|
|
|
* get_constraint_index
|
|
|
|
* Given the OID of a unique, primary-key, or exclusion constraint,
|
|
|
|
* return the OID of the underlying index.
|
|
|
|
*
|
2022-03-11 19:47:26 +01:00
|
|
|
* Returns InvalidOid if the constraint could not be found or is of
|
|
|
|
* the wrong type.
|
|
|
|
*
|
|
|
|
* The intent of this function is to return the index "owned" by the
|
|
|
|
* specified constraint. Therefore we must check contype, since some
|
|
|
|
* pg_constraint entries (e.g. for foreign-key constraints) store the
|
|
|
|
* OID of an index that is referenced but not owned by the constraint.
|
2020-12-09 15:12:05 +01:00
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_constraint_index(Oid conoid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conoid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_constraint contup = (Form_pg_constraint) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
2022-03-11 19:47:26 +01:00
|
|
|
if (contup->contype == CONSTRAINT_UNIQUE ||
|
|
|
|
contup->contype == CONSTRAINT_PRIMARY ||
|
|
|
|
contup->contype == CONSTRAINT_EXCLUSION)
|
|
|
|
result = contup->conindid;
|
|
|
|
else
|
|
|
|
result = InvalidOid;
|
2020-12-09 15:12:05 +01:00
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2015-04-26 16:33:14 +02:00
|
|
|
/* ---------- LANGUAGE CACHE ---------- */
|
|
|
|
|
|
|
|
char *
|
|
|
|
get_language_name(Oid langoid, bool missing_ok)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(LANGOID, ObjectIdGetDatum(langoid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_language lantup = (Form_pg_language) GETSTRUCT(tp);
|
|
|
|
char *result;
|
|
|
|
|
|
|
|
result = pstrdup(NameStr(lantup->lanname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!missing_ok)
|
|
|
|
elog(ERROR, "cache lookup failed for language %u",
|
|
|
|
langoid);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2001-08-21 18:36:06 +02:00
|
|
|
/* ---------- OPCLASS CACHE ---------- */
|
|
|
|
|
|
|
|
/*
|
2006-12-23 01:43:13 +01:00
|
|
|
* get_opclass_family
|
2001-08-21 18:36:06 +02:00
|
|
|
*
|
2006-12-23 01:43:13 +01:00
|
|
|
* Returns the OID of the operator family the opclass belongs to.
|
2001-08-21 18:36:06 +02:00
|
|
|
*/
|
2006-12-23 01:43:13 +01:00
|
|
|
Oid
|
|
|
|
get_opclass_family(Oid opclass)
|
2005-12-28 02:30:02 +01:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_opclass cla_tup;
|
2006-12-23 01:43:13 +01:00
|
|
|
Oid result;
|
2005-12-28 02:30:02 +01:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));
|
2005-12-28 02:30:02 +01:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for opclass %u", opclass);
|
|
|
|
cla_tup = (Form_pg_opclass) GETSTRUCT(tp);
|
|
|
|
|
2006-12-23 01:43:13 +01:00
|
|
|
result = cla_tup->opcfamily;
|
2005-12-28 02:30:02 +01:00
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_opclass_input_type
|
|
|
|
*
|
|
|
|
* Returns the OID of the datatype the opclass indexes.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_opclass_input_type(Oid opclass)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_opclass cla_tup;
|
|
|
|
Oid result;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));
|
2005-12-28 02:30:02 +01:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for opclass %u", opclass);
|
|
|
|
cla_tup = (Form_pg_opclass) GETSTRUCT(tp);
|
|
|
|
|
|
|
|
result = cla_tup->opcintype;
|
|
|
|
ReleaseSysCache(tp);
|
2003-06-23 00:04:55 +02:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/*
|
2019-06-14 02:34:34 +02:00
|
|
|
* get_opclass_opfamily_and_input_type
|
2018-09-19 00:54:10 +02:00
|
|
|
*
|
|
|
|
* Returns the OID of the operator family the opclass belongs to,
|
|
|
|
* the OID of the datatype the opclass indexes
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_opclass_opfamily_and_input_type(Oid opclass, Oid *opfamily, Oid *opcintype)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_opclass cla_tup;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
cla_tup = (Form_pg_opclass) GETSTRUCT(tp);
|
|
|
|
|
|
|
|
*opfamily = cla_tup->opcfamily;
|
|
|
|
*opcintype = cla_tup->opcintype;
|
|
|
|
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ---------- OPERATOR CACHE ---------- */
|
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_opcode
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Returns the regproc id of the routine used to implement an
|
1999-05-29 03:45:21 +02:00
|
|
|
* operator given the operator oid.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
RegProcedure
|
|
|
|
get_opcode(Oid opno)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
RegProcedure result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = optup->oprcode;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
2000-08-13 04:50:35 +02:00
|
|
|
return (RegProcedure) InvalidOid;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_opname
|
1996-07-09 08:22:35 +02:00
|
|
|
* returns the name of the operator with the given opno
|
|
|
|
*
|
1999-05-29 03:45:21 +02:00
|
|
|
* Note: returns a palloc'd copy of the string, or NULL if no such operator.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_opname(Oid opno)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
char *result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = pstrdup(NameStr(optup->oprname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
1999-05-29 03:45:21 +02:00
|
|
|
else
|
|
|
|
return NULL;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2016-01-22 01:47:15 +01:00
|
|
|
/*
|
|
|
|
* get_op_rettype
|
|
|
|
* Given operator oid, return the operator's result type.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_op_rettype(Oid opno)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = optup->oprresult;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2003-12-03 18:45:10 +01:00
|
|
|
/*
|
|
|
|
* op_input_types
|
|
|
|
*
|
|
|
|
* Returns the left and right input datatypes for an operator
|
|
|
|
* (InvalidOid if not relevant).
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
op_input_types(Oid opno, Oid *lefttype, Oid *righttype)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_operator optup;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
2003-12-03 18:45:10 +01:00
|
|
|
if (!HeapTupleIsValid(tp)) /* shouldn't happen */
|
|
|
|
elog(ERROR, "cache lookup failed for operator %u", opno);
|
|
|
|
optup = (Form_pg_operator) GETSTRUCT(tp);
|
|
|
|
*lefttype = optup->oprleft;
|
|
|
|
*righttype = optup->oprright;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* op_mergejoinable
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2006-12-23 01:43:13 +01:00
|
|
|
* Returns true if the operator is potentially mergejoinable. (The planner
|
|
|
|
* will fail to find any mergejoin plans unless there are suitable btree
|
|
|
|
* opfamily entries for this operator and associated sortops. The pg_operator
|
|
|
|
* flag is just a hint to tell the planner whether to bother looking.)
|
2010-10-31 02:55:20 +01:00
|
|
|
*
|
2011-06-03 21:38:12 +02:00
|
|
|
* In some cases (currently only array_eq and record_eq), mergejoinability
|
|
|
|
* depends on the specific input data type the operator is invoked for, so
|
|
|
|
* that must be passed as well. We currently assume that only one input's type
|
|
|
|
* is needed to check this --- by convention, pass the left input's data type.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
bool
|
2010-10-31 02:55:20 +01:00
|
|
|
op_mergejoinable(Oid opno, Oid inputtype)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2000-11-16 23:30:52 +01:00
|
|
|
bool result = false;
|
2011-06-03 21:38:12 +02:00
|
|
|
HeapTuple tp;
|
|
|
|
TypeCacheEntry *typentry;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-06-03 21:38:12 +02:00
|
|
|
/*
|
|
|
|
* For array_eq or record_eq, we can sort if the element or field types
|
|
|
|
* are all sortable. We could implement all the checks for that here, but
|
|
|
|
* the typcache already does that and caches the results too, so let's
|
|
|
|
* rely on the typcache.
|
|
|
|
*/
|
2010-10-31 02:55:20 +01:00
|
|
|
if (opno == ARRAY_EQ_OP)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2011-06-03 21:38:12 +02:00
|
|
|
typentry = lookup_type_cache(inputtype, TYPECACHE_CMP_PROC);
|
|
|
|
if (typentry->cmp_proc == F_BTARRAYCMP)
|
|
|
|
result = true;
|
|
|
|
}
|
|
|
|
else if (opno == RECORD_EQ_OP)
|
|
|
|
{
|
|
|
|
typentry = lookup_type_cache(inputtype, TYPECACHE_CMP_PROC);
|
|
|
|
if (typentry->cmp_proc == F_BTRECORDCMP)
|
|
|
|
result = true;
|
2010-10-31 02:55:20 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* For all other operators, rely on pg_operator.oprcanmerge */
|
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
|
|
|
|
|
|
|
result = optup->oprcanmerge;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
2000-11-16 23:30:52 +01:00
|
|
|
return result;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-02-14 00:22:53 +01:00
|
|
|
* op_hashjoinable
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2006-12-23 01:43:13 +01:00
|
|
|
* Returns true if the operator is hashjoinable. (There must be a suitable
|
|
|
|
* hash opfamily entry for this operator if it is so marked.)
|
2010-10-31 02:55:20 +01:00
|
|
|
*
|
|
|
|
* In some cases (currently only array_eq), hashjoinability depends on the
|
|
|
|
* specific input data type the operator is invoked for, so that must be
|
|
|
|
* passed as well. We currently assume that only one input's type is needed
|
|
|
|
* to check this --- by convention, pass the left input's data type.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2003-01-15 20:35:48 +01:00
|
|
|
bool
|
2010-10-31 02:55:20 +01:00
|
|
|
op_hashjoinable(Oid opno, Oid inputtype)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2003-01-15 20:35:48 +01:00
|
|
|
bool result = false;
|
2011-06-03 21:38:12 +02:00
|
|
|
HeapTuple tp;
|
|
|
|
TypeCacheEntry *typentry;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2011-06-03 21:38:12 +02:00
|
|
|
/* As in op_mergejoinable, let the typcache handle the hard cases */
|
2010-10-31 02:55:20 +01:00
|
|
|
if (opno == ARRAY_EQ_OP)
|
1999-05-29 03:45:21 +02:00
|
|
|
{
|
2011-06-03 21:38:12 +02:00
|
|
|
typentry = lookup_type_cache(inputtype, TYPECACHE_HASH_PROC);
|
|
|
|
if (typentry->hash_proc == F_HASH_ARRAY)
|
|
|
|
result = true;
|
2010-10-31 02:55:20 +01:00
|
|
|
}
|
2020-11-19 09:24:37 +01:00
|
|
|
else if (opno == RECORD_EQ_OP)
|
|
|
|
{
|
|
|
|
typentry = lookup_type_cache(inputtype, TYPECACHE_HASH_PROC);
|
|
|
|
if (typentry->hash_proc == F_HASH_RECORD)
|
|
|
|
result = true;
|
|
|
|
}
|
2010-10-31 02:55:20 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* For all other operators, rely on pg_operator.oprcanhash */
|
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
|
|
|
|
|
|
|
result = optup->oprcanhash;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
2000-11-16 23:30:52 +01:00
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
|
|
|
|
2002-12-01 22:05:14 +01:00
|
|
|
/*
|
|
|
|
* op_strict
|
|
|
|
*
|
|
|
|
* Get the proisstrict flag for the operator's underlying function.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
op_strict(Oid opno)
|
|
|
|
{
|
|
|
|
RegProcedure funcid = get_opcode(opno);
|
|
|
|
|
|
|
|
if (funcid == (RegProcedure) InvalidOid)
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "operator %u does not exist", opno);
|
2002-12-01 22:05:14 +01:00
|
|
|
|
|
|
|
return func_strict((Oid) funcid);
|
|
|
|
}
|
|
|
|
|
2000-08-13 04:50:35 +02:00
|
|
|
/*
|
2002-04-05 02:31:36 +02:00
|
|
|
* op_volatile
|
2000-08-13 04:50:35 +02:00
|
|
|
*
|
2002-04-05 02:31:36 +02:00
|
|
|
* Get the provolatile flag for the operator's underlying function.
|
2000-08-13 04:50:35 +02:00
|
|
|
*/
|
2002-04-05 02:31:36 +02:00
|
|
|
char
|
|
|
|
op_volatile(Oid opno)
|
2000-08-13 04:50:35 +02:00
|
|
|
{
|
|
|
|
RegProcedure funcid = get_opcode(opno);
|
|
|
|
|
|
|
|
if (funcid == (RegProcedure) InvalidOid)
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "operator %u does not exist", opno);
|
2000-08-13 04:50:35 +02:00
|
|
|
|
2002-04-05 02:31:36 +02:00
|
|
|
return func_volatile((Oid) funcid);
|
2000-08-13 04:50:35 +02:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_commutator
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Returns the corresponding commutator of an operator.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_commutator(Oid opno)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
Oid result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = optup->oprcom;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1999-05-29 03:45:21 +02:00
|
|
|
return InvalidOid;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_negator
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Returns the corresponding negator of an operator.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_negator(Oid opno)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
Oid result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = optup->oprnegate;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1999-05-29 03:45:21 +02:00
|
|
|
return InvalidOid;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_oprrest
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Returns procedure id for computing selectivity of an operator.
|
|
|
|
*/
|
|
|
|
RegProcedure
|
|
|
|
get_oprrest(Oid opno)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
RegProcedure result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = optup->oprrest;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
2000-08-13 04:50:35 +02:00
|
|
|
return (RegProcedure) InvalidOid;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_oprjoin
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Returns procedure id for computing selectivity of a join.
|
|
|
|
*/
|
|
|
|
RegProcedure
|
|
|
|
get_oprjoin(Oid opno)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
RegProcedure result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = optup->oprjoin;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
2000-08-13 04:50:35 +02:00
|
|
|
return (RegProcedure) InvalidOid;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
1999-08-16 04:06:25 +02:00
|
|
|
/* ---------- FUNCTION CACHE ---------- */
|
|
|
|
|
2002-04-27 05:45:03 +02:00
|
|
|
/*
|
|
|
|
* get_func_name
|
|
|
|
* returns the name of the function with the given funcid
|
|
|
|
*
|
|
|
|
* Note: returns a palloc'd copy of the string, or NULL if no such function.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_func_name(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2002-04-27 05:45:03 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_proc functup = (Form_pg_proc) GETSTRUCT(tp);
|
|
|
|
char *result;
|
|
|
|
|
|
|
|
result = pstrdup(NameStr(functup->proname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-08-10 07:46:50 +02:00
|
|
|
/*
|
|
|
|
* get_func_namespace
|
|
|
|
*
|
|
|
|
* Returns the pg_namespace OID associated with a given function.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_func_namespace(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2009-08-10 07:46:50 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_proc functup = (Form_pg_proc) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = functup->pronamespace;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
1999-08-16 04:06:25 +02:00
|
|
|
/*
|
|
|
|
* get_func_rettype
|
|
|
|
* Given procedure id, return the function's result type.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_func_rettype(Oid funcid)
|
|
|
|
{
|
2000-11-16 23:30:52 +01:00
|
|
|
HeapTuple tp;
|
|
|
|
Oid result;
|
1999-08-16 04:06:25 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2000-11-16 23:30:52 +01:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
1999-08-16 04:06:25 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->prorettype;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-08-16 04:06:25 +02:00
|
|
|
}
|
|
|
|
|
2005-03-29 02:17:27 +02:00
|
|
|
/*
|
|
|
|
* get_func_nargs
|
|
|
|
* Given procedure id, return the number of arguments.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
get_func_nargs(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
int result;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2005-03-29 02:17:27 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
|
|
|
|
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->pronargs;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2003-07-01 21:10:53 +02:00
|
|
|
/*
|
|
|
|
* get_func_signature
|
|
|
|
* Given procedure id, return the function's argument and result types.
|
|
|
|
* (The return value is the result type.)
|
|
|
|
*
|
2005-03-29 02:17:27 +02:00
|
|
|
* The arguments are returned as a palloc'd array.
|
2003-07-01 21:10:53 +02:00
|
|
|
*/
|
|
|
|
Oid
|
2005-03-29 02:17:27 +02:00
|
|
|
get_func_signature(Oid funcid, Oid **argtypes, int *nargs)
|
2003-07-01 21:10:53 +02:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_proc procstruct;
|
|
|
|
Oid result;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2003-07-01 21:10:53 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
2003-07-01 21:10:53 +02:00
|
|
|
|
|
|
|
procstruct = (Form_pg_proc) GETSTRUCT(tp);
|
|
|
|
|
|
|
|
result = procstruct->prorettype;
|
|
|
|
*nargs = (int) procstruct->pronargs;
|
2005-03-29 02:17:27 +02:00
|
|
|
Assert(*nargs == procstruct->proargtypes.dim1);
|
|
|
|
*argtypes = (Oid *) palloc(*nargs * sizeof(Oid));
|
|
|
|
memcpy(*argtypes, procstruct->proargtypes.values, *nargs * sizeof(Oid));
|
2003-07-01 21:10:53 +02:00
|
|
|
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
Support ordered-set (WITHIN GROUP) aggregates.
This patch introduces generic support for ordered-set and hypothetical-set
aggregate functions, as well as implementations of the instances defined in
SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(),
percent_rank(), cume_dist()). We also added mode() though it is not in the
spec, as well as versions of percentile_cont() and percentile_disc() that
can compute multiple percentile values in one pass over the data.
Unlike the original submission, this patch puts full control of the sorting
process in the hands of the aggregate's support functions. To allow the
support functions to find out how they're supposed to sort, a new API
function AggGetAggref() is added to nodeAgg.c. This allows retrieval of
the aggregate call's Aggref node, which may have other uses beyond the
immediate need. There is also support for ordered-set aggregates to
install cleanup callback functions, so that they can be sure that
infrastructure such as tuplesort objects gets cleaned up.
In passing, make some fixes in the recently-added support for variadic
aggregates, and make some editorial adjustments in the recent FILTER
additions for aggregates. Also, simplify use of IsBinaryCoercible() by
allowing it to succeed whenever the target type is ANY or ANYELEMENT.
It was inconsistent that it dealt with other polymorphic target types
but not these.
Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing,
and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
|
|
|
/*
|
|
|
|
* get_func_variadictype
|
|
|
|
* Given procedure id, return the function's provariadic field.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_func_variadictype(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
|
|
|
|
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->provariadic;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2002-05-13 01:43:04 +02:00
|
|
|
/*
|
|
|
|
* get_func_retset
|
|
|
|
* Given procedure id, return the function's proretset flag.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_func_retset(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
bool result;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2002-05-13 01:43:04 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
2002-05-13 01:43:04 +02:00
|
|
|
|
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->proretset;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2002-12-01 22:05:14 +01:00
|
|
|
/*
|
|
|
|
* func_strict
|
|
|
|
* Given procedure id, return the function's proisstrict flag.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
func_strict(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
bool result;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2002-12-01 22:05:14 +01:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
2002-12-01 22:05:14 +01:00
|
|
|
|
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->proisstrict;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2000-08-13 04:50:35 +02:00
|
|
|
/*
|
2002-04-05 02:31:36 +02:00
|
|
|
* func_volatile
|
|
|
|
* Given procedure id, return the function's provolatile flag.
|
2000-08-13 04:50:35 +02:00
|
|
|
*/
|
2002-04-05 02:31:36 +02:00
|
|
|
char
|
|
|
|
func_volatile(Oid funcid)
|
2000-08-13 04:50:35 +02:00
|
|
|
{
|
2000-11-16 23:30:52 +01:00
|
|
|
HeapTuple tp;
|
2002-04-05 02:31:36 +02:00
|
|
|
char result;
|
2000-08-13 04:50:35 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2000-11-16 23:30:52 +01:00
|
|
|
if (!HeapTupleIsValid(tp))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
2000-08-13 04:50:35 +02:00
|
|
|
|
2002-04-05 02:31:36 +02:00
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->provolatile;
|
2000-11-16 23:30:52 +01:00
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
2000-08-13 04:50:35 +02:00
|
|
|
}
|
|
|
|
|
2015-09-16 21:38:47 +02:00
|
|
|
/*
|
|
|
|
* func_parallel
|
|
|
|
* Given procedure id, return the function's proparallel flag.
|
|
|
|
*/
|
|
|
|
char
|
|
|
|
func_parallel(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
char result;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
|
|
|
|
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->proparallel;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:46:13 +01:00
|
|
|
/*
|
2018-03-02 14:57:38 +01:00
|
|
|
* get_func_prokind
|
|
|
|
* Given procedure id, return the routine kind.
|
2017-11-30 14:46:13 +01:00
|
|
|
*/
|
2018-03-02 14:57:38 +01:00
|
|
|
char
|
|
|
|
get_func_prokind(Oid funcid)
|
2017-11-30 14:46:13 +01:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
2018-03-02 14:57:38 +01:00
|
|
|
char result;
|
2017-11-30 14:46:13 +01:00
|
|
|
|
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
|
|
|
|
2018-03-02 14:57:38 +01:00
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->prokind;
|
2017-11-30 14:46:13 +01:00
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-02-14 04:20:27 +01:00
|
|
|
/*
|
|
|
|
* get_func_leakproof
|
|
|
|
* Given procedure id, return the function's leakproof field.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_func_leakproof(Oid funcid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
bool result;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for function %u", funcid);
|
|
|
|
|
|
|
|
result = ((Form_pg_proc) GETSTRUCT(tp))->proleakproof;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-01-22 02:35:23 +01:00
|
|
|
/*
|
2019-02-10 00:32:23 +01:00
|
|
|
* get_func_support
|
|
|
|
*
|
|
|
|
* Returns the support function OID associated with a given function,
|
|
|
|
* or InvalidOid if there is none.
|
2007-01-22 02:35:23 +01:00
|
|
|
*/
|
2019-02-10 00:32:23 +01:00
|
|
|
RegProcedure
|
|
|
|
get_func_support(Oid funcid)
|
2007-01-22 02:35:23 +01:00
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
|
2019-02-10 00:32:23 +01:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_proc functup = (Form_pg_proc) GETSTRUCT(tp);
|
|
|
|
RegProcedure result;
|
2007-01-22 02:35:23 +01:00
|
|
|
|
2019-02-10 00:32:23 +01:00
|
|
|
result = functup->prosupport;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return (RegProcedure) InvalidOid;
|
2007-01-22 02:35:23 +01:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ---------- RELATION CACHE ---------- */
|
|
|
|
|
2002-03-26 20:17:02 +01:00
|
|
|
/*
|
|
|
|
* get_relname_relid
|
|
|
|
* Given name and namespace of a relation, look up the OID.
|
|
|
|
*
|
|
|
|
* Returns InvalidOid if there is no such relation.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_relname_relid(const char *relname, Oid relnamespace)
|
|
|
|
{
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
return GetSysCacheOid2(RELNAMENSP, Anum_pg_class_oid,
|
2010-02-14 19:42:19 +01:00
|
|
|
PointerGetDatum(relname),
|
|
|
|
ObjectIdGetDatum(relnamespace));
|
2002-03-26 20:17:02 +01:00
|
|
|
}
|
|
|
|
|
2000-06-09 00:38:00 +02:00
|
|
|
#ifdef NOT_USED
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_relnatts
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Returns the number of attributes for a given relation.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
get_relnatts(Oid relid)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
int result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = reltup->relnatts;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1998-01-20 06:05:08 +01:00
|
|
|
return InvalidAttrNumber;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
2000-06-09 00:38:00 +02:00
|
|
|
#endif
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_rel_name
|
1996-07-09 08:22:35 +02:00
|
|
|
* Returns the name of a given relation.
|
2000-11-16 23:30:52 +01:00
|
|
|
*
|
2002-03-26 20:17:02 +01:00
|
|
|
* Returns a palloc'd copy of the string, or NULL if no such relation.
|
|
|
|
*
|
|
|
|
* NOTE: since relation name is not unique, be wary of code that uses this
|
|
|
|
* for anything except preparing error messages.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_rel_name(Oid relid)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
char *result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = pstrdup(NameStr(reltup->relname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1998-01-20 06:05:08 +01:00
|
|
|
return NULL;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2002-04-30 03:26:26 +02:00
|
|
|
/*
|
|
|
|
* get_rel_namespace
|
|
|
|
*
|
|
|
|
* Returns the pg_namespace OID associated with a given relation.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_rel_namespace(Oid relid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
2002-04-30 03:26:26 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = reltup->relnamespace;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2002-03-22 03:56:37 +01:00
|
|
|
/*
|
|
|
|
* get_rel_type_id
|
|
|
|
*
|
|
|
|
* Returns the pg_type OID associated with a given relation.
|
|
|
|
*
|
|
|
|
* Note: not all pg_class entries have associated pg_type OIDs; so be
|
|
|
|
* careful to check for InvalidOid result.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_rel_type_id(Oid relid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
2002-03-22 03:56:37 +01:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = reltup->reltype;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2002-09-20 01:40:56 +02:00
|
|
|
/*
|
|
|
|
* get_rel_relkind
|
|
|
|
*
|
|
|
|
* Returns the relkind associated with a given relation.
|
|
|
|
*/
|
|
|
|
char
|
|
|
|
get_rel_relkind(Oid relid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
2002-09-20 01:40:56 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
|
|
|
char result;
|
|
|
|
|
|
|
|
result = reltup->relkind;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return '\0';
|
|
|
|
}
|
|
|
|
|
Fix assorted bugs in pg_get_partition_constraintdef().
It failed if passed a nonexistent relation OID, or one that was a non-heap
relation, because of blindly applying heap_open to a user-supplied OID.
This is not OK behavior for a SQL-exposed function; we have a project
policy that we should return NULL in such cases. Moreover, since
pg_get_partition_constraintdef ought now to work on indexes, restricting
it to heaps is flat wrong anyway.
The underlying function generate_partition_qual() wasn't on board with
indexes having partition quals either, nor for that matter with rels
having relispartition set but yet null relpartbound. (One wonders
whether the person who wrote the function comment blocks claiming that
these functions allow a missing relpartbound had ever tested it.)
Fix by testing relispartition before opening the rel, and by using
relation_open not heap_open. (If any other relkinds ever grow the
ability to have relispartition set, the code will work with them
automatically.) Also, don't reject null relpartbound in
generate_partition_qual.
Back-patch to v11, and all but the null-relpartbound change to v10.
(It's not really necessary to change generate_partition_qual at all
in v10, but I thought s/heap_open/relation_open/ would be a good
idea anyway just to keep the code in sync with later branches.)
Per report from Justin Pryzby.
Discussion: https://postgr.es/m/20180927200020.GJ776@telsasoft.com
2018-09-28 00:15:06 +02:00
|
|
|
/*
|
|
|
|
* get_rel_relispartition
|
|
|
|
*
|
|
|
|
* Returns the relispartition flag associated with a given relation.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_rel_relispartition(Oid relid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
|
|
|
bool result;
|
|
|
|
|
|
|
|
result = reltup->relispartition;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-10-13 17:55:40 +02:00
|
|
|
/*
|
|
|
|
* get_rel_tablespace
|
|
|
|
*
|
|
|
|
* Returns the pg_tablespace OID associated with a given relation.
|
|
|
|
*
|
|
|
|
* Note: InvalidOid might mean either that we couldn't find the relation,
|
|
|
|
* or that it is in the database's default tablespace.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_rel_tablespace(Oid relid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
2007-10-13 17:55:40 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = reltup->reltablespace;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
Generate parallel sequential scan plans in simple cases.
Add a new flag, consider_parallel, to each RelOptInfo, indicating
whether a plan for that relation could conceivably be run inside of
a parallel worker. Right now, we're pretty conservative: for example,
it might be possible to defer applying a parallel-restricted qual
in a worker, and later do it in the leader, but right now we just
don't try to parallelize access to that relation. That's probably
the right decision in most cases, anyway.
Using the new flag, generate parallel sequential scan plans for plain
baserels, meaning that we now have parallel sequential scan in
PostgreSQL. The logic here is pretty unsophisticated right now: the
costing model probably isn't right in detail, and we can't push joins
beneath Gather nodes, so the number of plans that can actually benefit
from this is pretty limited right now. Lots more work is needed.
Nevertheless, it seems time to enable this functionality so that all
this code can actually be tested easily by users and developers.
Note that, if you wish to test this functionality, it will be
necessary to set max_parallel_degree to a value greater than the
default of 0. Once a few more loose ends have been tidied up here, we
might want to consider changing the default value of this GUC, but
I'm leaving it alone for now.
Along the way, fix a bug in cost_gather: the previous coding thought
that a Gather node's transfer overhead should be costed on the basis of
the relation size rather than the number of tuples that actually need
to be passed off to the leader.
Patch by me, reviewed in earlier versions by Amit Kapila.
2015-11-11 15:02:52 +01:00
|
|
|
/*
|
|
|
|
* get_rel_persistence
|
|
|
|
*
|
|
|
|
* Returns the relpersistence associated with a given relation.
|
|
|
|
*/
|
|
|
|
char
|
|
|
|
get_rel_persistence(Oid relid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_class reltup;
|
|
|
|
char result;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
|
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for relation %u", relid);
|
|
|
|
reltup = (Form_pg_class) GETSTRUCT(tp);
|
|
|
|
result = reltup->relpersistence;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2002-09-20 01:40:56 +02:00
|
|
|
|
2015-04-26 16:33:14 +02:00
|
|
|
/* ---------- TRANSFORM CACHE ---------- */
|
|
|
|
|
|
|
|
Oid
|
|
|
|
get_transform_fromsql(Oid typid, Oid langid, List *trftypes)
|
|
|
|
{
|
|
|
|
HeapTuple tup;
|
|
|
|
|
|
|
|
if (!list_member_oid(trftypes, typid))
|
|
|
|
return InvalidOid;
|
|
|
|
|
|
|
|
tup = SearchSysCache2(TRFTYPELANG, typid, langid);
|
|
|
|
if (HeapTupleIsValid(tup))
|
|
|
|
{
|
|
|
|
Oid funcid;
|
|
|
|
|
|
|
|
funcid = ((Form_pg_transform) GETSTRUCT(tup))->trffromsql;
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
return funcid;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
|
|
|
Oid
|
|
|
|
get_transform_tosql(Oid typid, Oid langid, List *trftypes)
|
|
|
|
{
|
|
|
|
HeapTuple tup;
|
|
|
|
|
|
|
|
if (!list_member_oid(trftypes, typid))
|
|
|
|
return InvalidOid;
|
|
|
|
|
|
|
|
tup = SearchSysCache2(TRFTYPELANG, typid, langid);
|
|
|
|
if (HeapTupleIsValid(tup))
|
|
|
|
{
|
|
|
|
Oid funcid;
|
|
|
|
|
|
|
|
funcid = ((Form_pg_transform) GETSTRUCT(tup))->trftosql;
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
return funcid;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ---------- TYPE CACHE ---------- */
|
|
|
|
|
2002-03-29 20:06:29 +01:00
|
|
|
/*
|
|
|
|
* get_typisdefined
|
|
|
|
*
|
|
|
|
* Given the type OID, determine whether the type is defined
|
|
|
|
* (if not, it's only a shell).
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_typisdefined(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2002-03-29 20:06:29 +01:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
bool result;
|
|
|
|
|
|
|
|
result = typtup->typisdefined;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_typlen
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Given the type OID, return the length of the type.
|
|
|
|
*/
|
|
|
|
int16
|
|
|
|
get_typlen(Oid typid)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
int16 result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = typtup->typlen;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1999-05-29 03:45:21 +02:00
|
|
|
return 0;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_typbyval
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* Given the type OID, determine whether the type is returned by value or
|
2000-07-23 05:50:26 +02:00
|
|
|
* not. Returns true if by value, false if by reference.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_typbyval(Oid typid)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
bool result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = typtup->typbyval;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1998-01-20 06:05:08 +01:00
|
|
|
return false;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
/*
|
|
|
|
* get_typlenbyval
|
|
|
|
*
|
|
|
|
* A two-fer: given the type OID, return both typlen and typbyval.
|
|
|
|
*
|
|
|
|
* Since both pieces of info are needed to know how to copy a Datum,
|
|
|
|
* many places need both. Might as well get them with one cache lookup
|
|
|
|
* instead of two. Also, this routine raises an error instead of
|
|
|
|
* returning a bogus value when given a bad type OID.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_type typtup;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2000-11-16 23:30:52 +01:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for type %u", typid);
|
|
|
|
typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
*typlen = typtup->typlen;
|
|
|
|
*typbyval = typtup->typbyval;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
|
|
|
|
2002-08-26 19:54:02 +02:00
|
|
|
/*
|
|
|
|
* get_typlenbyvalalign
|
|
|
|
*
|
|
|
|
* A three-fer: given the type OID, return typlen, typbyval, typalign.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
get_typlenbyvalalign(Oid typid, int16 *typlen, bool *typbyval,
|
|
|
|
char *typalign)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_type typtup;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2002-08-26 19:54:02 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for type %u", typid);
|
|
|
|
typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
*typlen = typtup->typlen;
|
|
|
|
*typbyval = typtup->typbyval;
|
|
|
|
*typalign = typtup->typalign;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
|
|
|
|
2004-06-06 02:41:28 +02:00
|
|
|
/*
|
|
|
|
* getTypeIOParam
|
|
|
|
* Given a pg_type row, select the type OID to pass to I/O functions
|
|
|
|
*
|
|
|
|
* Formerly, all I/O functions were passed pg_type.typelem as their second
|
|
|
|
* parameter, but we now have a more complex rule about what to pass.
|
|
|
|
* This knowledge is intended to be centralized here --- direct references
|
|
|
|
* to typelem elsewhere in the code are wrong, if they are associated with
|
|
|
|
* I/O calls and not with actual subscripting operations! (But see
|
2006-08-16 00:36:17 +02:00
|
|
|
* bootstrap.c's boot_get_type_io_data() if you need to change this.)
|
2005-05-01 20:56:19 +02:00
|
|
|
*
|
|
|
|
* As of PostgreSQL 8.1, output functions receive only the value itself
|
|
|
|
* and not any auxiliary parameters, so the name of this routine is now
|
|
|
|
* a bit of a misnomer ... it should be getTypeInputParam.
|
2004-06-06 02:41:28 +02:00
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
getTypeIOParam(HeapTuple typeTuple)
|
|
|
|
{
|
|
|
|
Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
|
|
|
|
|
|
|
|
/*
|
2005-08-12 23:49:47 +02:00
|
|
|
* Array types get their typelem as parameter; everybody else gets their
|
2011-12-01 18:44:16 +01:00
|
|
|
* own type OID as parameter.
|
2004-06-06 02:41:28 +02:00
|
|
|
*/
|
2011-12-01 18:44:16 +01:00
|
|
|
if (OidIsValid(typeStruct->typelem))
|
2004-06-06 02:41:28 +02:00
|
|
|
return typeStruct->typelem;
|
2005-08-12 23:49:47 +02:00
|
|
|
else
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
return typeStruct->oid;
|
2004-06-06 02:41:28 +02:00
|
|
|
}
|
|
|
|
|
2003-06-27 02:33:26 +02:00
|
|
|
/*
|
|
|
|
* get_type_io_data
|
|
|
|
*
|
|
|
|
* A six-fer: given the type OID, return typlen, typbyval, typalign,
|
2004-06-06 02:41:28 +02:00
|
|
|
* typdelim, typioparam, and IO function OID. The IO function
|
2003-06-27 02:33:26 +02:00
|
|
|
* returned is controlled by IOFuncSelector
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
get_type_io_data(Oid typid,
|
|
|
|
IOFuncSelector which_func,
|
|
|
|
int16 *typlen,
|
|
|
|
bool *typbyval,
|
|
|
|
char *typalign,
|
|
|
|
char *typdelim,
|
2004-06-06 02:41:28 +02:00
|
|
|
Oid *typioparam,
|
2003-06-27 02:33:26 +02:00
|
|
|
Oid *func)
|
|
|
|
{
|
|
|
|
HeapTuple typeTuple;
|
|
|
|
Form_pg_type typeStruct;
|
|
|
|
|
2006-08-16 00:36:17 +02:00
|
|
|
/*
|
|
|
|
* In bootstrap mode, pass it off to bootstrap.c. This hack allows us to
|
|
|
|
* use array_in and array_out during bootstrap.
|
|
|
|
*/
|
|
|
|
if (IsBootstrapProcessingMode())
|
|
|
|
{
|
|
|
|
Oid typinput;
|
|
|
|
Oid typoutput;
|
|
|
|
|
|
|
|
boot_get_type_io_data(typid,
|
|
|
|
typlen,
|
|
|
|
typbyval,
|
|
|
|
typalign,
|
|
|
|
typdelim,
|
|
|
|
typioparam,
|
|
|
|
&typinput,
|
|
|
|
&typoutput);
|
|
|
|
switch (which_func)
|
|
|
|
{
|
|
|
|
case IOFunc_input:
|
|
|
|
*func = typinput;
|
|
|
|
break;
|
|
|
|
case IOFunc_output:
|
|
|
|
*func = typoutput;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
elog(ERROR, "binary I/O not supported during bootstrap");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2003-06-27 02:33:26 +02:00
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
|
|
|
elog(ERROR, "cache lookup failed for type %u", typid);
|
|
|
|
typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
|
|
|
|
|
|
|
|
*typlen = typeStruct->typlen;
|
|
|
|
*typbyval = typeStruct->typbyval;
|
|
|
|
*typalign = typeStruct->typalign;
|
|
|
|
*typdelim = typeStruct->typdelim;
|
2004-06-06 02:41:28 +02:00
|
|
|
*typioparam = getTypeIOParam(typeTuple);
|
2003-06-27 02:33:26 +02:00
|
|
|
switch (which_func)
|
|
|
|
{
|
|
|
|
case IOFunc_input:
|
|
|
|
*func = typeStruct->typinput;
|
|
|
|
break;
|
|
|
|
case IOFunc_output:
|
|
|
|
*func = typeStruct->typoutput;
|
|
|
|
break;
|
|
|
|
case IOFunc_receive:
|
|
|
|
*func = typeStruct->typreceive;
|
|
|
|
break;
|
|
|
|
case IOFunc_send:
|
|
|
|
*func = typeStruct->typsend;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ReleaseSysCache(typeTuple);
|
|
|
|
}
|
|
|
|
|
1997-08-19 23:40:56 +02:00
|
|
|
#ifdef NOT_USED
|
1996-07-09 08:22:35 +02:00
|
|
|
char
|
|
|
|
get_typalign(Oid typid)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
char result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = typtup->typalign;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
2020-03-04 16:34:25 +01:00
|
|
|
return TYPALIGN_INT;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
1997-08-19 23:40:56 +02:00
|
|
|
#endif
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2000-11-20 21:36:57 +01:00
|
|
|
char
|
|
|
|
get_typstorage(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2000-11-20 21:36:57 +01:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
char result;
|
|
|
|
|
|
|
|
result = typtup->typstorage;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
2020-03-04 16:34:25 +01:00
|
|
|
return TYPSTORAGE_PLAIN;
|
2000-11-20 21:36:57 +01:00
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_typdefault
|
2001-09-06 04:07:42 +02:00
|
|
|
* Given a type OID, return the type's default value, if any.
|
2002-03-20 20:45:13 +01:00
|
|
|
*
|
|
|
|
* The result is a palloc'd expression node tree, or NULL if there
|
|
|
|
* is no defined default for the datatype.
|
|
|
|
*
|
|
|
|
* NB: caller should be prepared to coerce result to correct datatype;
|
|
|
|
* the returned expression tree might produce something of the wrong type.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2002-03-19 03:18:25 +01:00
|
|
|
Node *
|
2002-03-20 20:45:13 +01:00
|
|
|
get_typdefault(Oid typid)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1999-08-09 05:13:31 +02:00
|
|
|
HeapTuple typeTuple;
|
|
|
|
Form_pg_type type;
|
2002-03-19 03:18:25 +01:00
|
|
|
Datum datum;
|
2000-01-23 04:43:24 +01:00
|
|
|
bool isNull;
|
2002-03-19 03:18:25 +01:00
|
|
|
Node *expr;
|
1999-08-09 05:13:31 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
1999-08-09 05:13:31 +02:00
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", typid);
|
1999-08-09 05:13:31 +02:00
|
|
|
type = (Form_pg_type) GETSTRUCT(typeTuple);
|
2000-01-23 04:43:24 +01:00
|
|
|
|
|
|
|
/*
|
2002-03-20 20:45:13 +01:00
|
|
|
* typdefault and typdefaultbin are potentially null, so don't try to
|
|
|
|
* access 'em as struct fields. Must do it the hard way with
|
|
|
|
* SysCacheGetAttr.
|
2000-01-23 04:43:24 +01:00
|
|
|
*/
|
2002-03-19 03:18:25 +01:00
|
|
|
datum = SysCacheGetAttr(TYPEOID,
|
|
|
|
typeTuple,
|
|
|
|
Anum_pg_type_typdefaultbin,
|
|
|
|
&isNull);
|
2000-01-23 04:43:24 +01:00
|
|
|
|
2002-03-20 20:45:13 +01:00
|
|
|
if (!isNull)
|
|
|
|
{
|
|
|
|
/* We have an expression default */
|
2008-03-25 23:42:46 +01:00
|
|
|
expr = stringToNode(TextDatumGetCString(datum));
|
2002-03-20 20:45:13 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Perhaps we have a plain literal default */
|
|
|
|
datum = SysCacheGetAttr(TYPEOID,
|
|
|
|
typeTuple,
|
|
|
|
Anum_pg_type_typdefault,
|
|
|
|
&isNull);
|
|
|
|
|
|
|
|
if (!isNull)
|
|
|
|
{
|
|
|
|
char *strDefaultVal;
|
|
|
|
|
|
|
|
/* Convert text datum to C string */
|
2008-03-25 23:42:46 +01:00
|
|
|
strDefaultVal = TextDatumGetCString(datum);
|
2002-03-20 20:45:13 +01:00
|
|
|
/* Convert C string to a value of the given type */
|
2006-04-04 21:35:37 +02:00
|
|
|
datum = OidInputFunctionCall(type->typinput, strDefaultVal,
|
|
|
|
getTypeIOParam(typeTuple), -1);
|
2002-03-20 20:45:13 +01:00
|
|
|
/* Build a Const node containing the value */
|
|
|
|
expr = (Node *) makeConst(typid,
|
2007-03-17 01:11:05 +01:00
|
|
|
-1,
|
2011-03-26 01:10:42 +01:00
|
|
|
type->typcollation,
|
2002-03-20 20:45:13 +01:00
|
|
|
type->typlen,
|
|
|
|
datum,
|
|
|
|
false,
|
2002-11-25 22:29:42 +01:00
|
|
|
type->typbyval);
|
2002-03-20 20:45:13 +01:00
|
|
|
pfree(strDefaultVal);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* No default */
|
|
|
|
expr = NULL;
|
|
|
|
}
|
|
|
|
}
|
1999-08-09 05:13:31 +02:00
|
|
|
|
2002-03-20 20:45:13 +01:00
|
|
|
ReleaseSysCache(typeTuple);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-03-20 20:45:13 +01:00
|
|
|
return expr;
|
|
|
|
}
|
2002-03-06 21:35:02 +01:00
|
|
|
|
2002-03-20 20:45:13 +01:00
|
|
|
/*
|
|
|
|
* getBaseType
|
|
|
|
* If the given type is a domain, return its base type;
|
|
|
|
* otherwise return the type's own OID.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
getBaseType(Oid typid)
|
2006-04-06 00:11:58 +02:00
|
|
|
{
|
|
|
|
int32 typmod = -1;
|
|
|
|
|
|
|
|
return getBaseTypeAndTypmod(typid, &typmod);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* getBaseTypeAndTypmod
|
|
|
|
* If the given type is a domain, return its base type and typmod;
|
|
|
|
* otherwise return the type's own OID, and leave *typmod unchanged.
|
|
|
|
*
|
|
|
|
* Note that the "applied typmod" should be -1 for every domain level
|
|
|
|
* above the bottommost; therefore, if the passed-in typid is indeed
|
|
|
|
* a domain, *typmod should be -1.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
getBaseTypeAndTypmod(Oid typid, int32 *typmod)
|
2002-03-20 20:45:13 +01:00
|
|
|
{
|
2002-03-19 03:18:25 +01:00
|
|
|
/*
|
2002-03-20 20:45:13 +01:00
|
|
|
* We loop to find the bottom base type in a stack of domains.
|
2002-03-19 03:18:25 +01:00
|
|
|
*/
|
2002-03-20 20:45:13 +01:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
HeapTuple tup;
|
|
|
|
Form_pg_type typTup;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2002-03-20 20:45:13 +01:00
|
|
|
if (!HeapTupleIsValid(tup))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", typid);
|
2002-03-20 20:45:13 +01:00
|
|
|
typTup = (Form_pg_type) GETSTRUCT(tup);
|
2007-04-02 05:49:42 +02:00
|
|
|
if (typTup->typtype != TYPTYPE_DOMAIN)
|
2002-03-20 20:45:13 +01:00
|
|
|
{
|
|
|
|
/* Not a domain, so done */
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
break;
|
|
|
|
}
|
2002-03-19 03:18:25 +01:00
|
|
|
|
2006-04-06 00:11:58 +02:00
|
|
|
Assert(*typmod == -1);
|
2002-03-20 20:45:13 +01:00
|
|
|
typid = typTup->typbasetype;
|
2006-04-06 00:11:58 +02:00
|
|
|
*typmod = typTup->typtypmod;
|
|
|
|
|
2002-03-20 20:45:13 +01:00
|
|
|
ReleaseSysCache(tup);
|
|
|
|
}
|
2000-11-16 23:30:52 +01:00
|
|
|
|
2002-03-20 20:45:13 +01:00
|
|
|
return typid;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2001-05-09 02:35:09 +02:00
|
|
|
/*
|
|
|
|
* get_typavgwidth
|
|
|
|
*
|
|
|
|
* Given a type OID and a typmod value (pass -1 if typmod is unknown),
|
|
|
|
* estimate the average width of values of the type. This is used by
|
|
|
|
* the planner, which doesn't require absolutely correct results;
|
|
|
|
* it's OK (and expected) to guess if we don't know for sure.
|
|
|
|
*/
|
|
|
|
int32
|
|
|
|
get_typavgwidth(Oid typid, int32 typmod)
|
|
|
|
{
|
|
|
|
int typlen = get_typlen(typid);
|
|
|
|
int32 maxwidth;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Easy if it's a fixed-width type
|
|
|
|
*/
|
|
|
|
if (typlen > 0)
|
|
|
|
return typlen;
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2001-05-09 02:35:09 +02:00
|
|
|
/*
|
|
|
|
* type_maximum_size knows the encoding of typmod for some datatypes;
|
|
|
|
* don't duplicate that knowledge here.
|
|
|
|
*/
|
|
|
|
maxwidth = type_maximum_size(typid, typmod);
|
|
|
|
if (maxwidth > 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* For BPCHAR, the max width is also the only width. Otherwise we
|
|
|
|
* need to guess about the typical data width given the max. A sliding
|
|
|
|
* scale for percentage of max width seems reasonable.
|
|
|
|
*/
|
|
|
|
if (typid == BPCHAROID)
|
|
|
|
return maxwidth;
|
|
|
|
if (maxwidth <= 32)
|
|
|
|
return maxwidth; /* assume full width */
|
|
|
|
if (maxwidth < 1000)
|
|
|
|
return 32 + (maxwidth - 32) / 2; /* assume 50% */
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2001-05-09 02:35:09 +02:00
|
|
|
/*
|
|
|
|
* Beyond 1000, assume we're looking at something like
|
|
|
|
* "varchar(10000)" where the limit isn't actually reached often, and
|
|
|
|
* use a fixed estimate.
|
|
|
|
*/
|
|
|
|
return 32 + (1000 - 32) / 2;
|
|
|
|
}
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2001-05-09 02:35:09 +02:00
|
|
|
/*
|
2017-03-14 16:38:30 +01:00
|
|
|
* Oops, we have no idea ... wild guess time.
|
2001-05-09 02:35:09 +02:00
|
|
|
*/
|
|
|
|
return 32;
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2000-07-23 05:50:26 +02:00
|
|
|
* get_typtype
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2002-08-05 04:30:50 +02:00
|
|
|
* Given the type OID, find if it is a basic type, a complex type, etc.
|
1996-07-09 08:22:35 +02:00
|
|
|
* It returns the null char if the cache lookup fails...
|
|
|
|
*/
|
|
|
|
char
|
|
|
|
get_typtype(Oid typid)
|
|
|
|
{
|
1999-05-29 03:45:21 +02:00
|
|
|
HeapTuple tp;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
1999-05-29 03:45:21 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
2000-11-16 23:30:52 +01:00
|
|
|
char result;
|
2000-04-12 19:17:23 +02:00
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = typtup->typtype;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
1999-05-29 03:45:21 +02:00
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
else
|
1998-01-20 06:05:08 +01:00
|
|
|
return '\0';
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
2002-08-05 04:30:50 +02:00
|
|
|
|
2006-09-28 22:51:43 +02:00
|
|
|
/*
|
|
|
|
* type_is_rowtype
|
|
|
|
*
|
|
|
|
* Convenience function to determine whether a type OID represents
|
2017-10-26 19:47:45 +02:00
|
|
|
* a "rowtype" type --- either RECORD or a named composite type
|
|
|
|
* (including a domain over a named composite type).
|
2006-09-28 22:51:43 +02:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
type_is_rowtype(Oid typid)
|
|
|
|
{
|
2017-10-26 19:47:45 +02:00
|
|
|
if (typid == RECORDOID)
|
|
|
|
return true; /* easy case */
|
|
|
|
switch (get_typtype(typid))
|
|
|
|
{
|
|
|
|
case TYPTYPE_COMPOSITE:
|
|
|
|
return true;
|
|
|
|
case TYPTYPE_DOMAIN:
|
|
|
|
if (get_typtype(getBaseType(typid)) == TYPTYPE_COMPOSITE)
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
2007-04-02 05:49:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* type_is_enum
|
|
|
|
* Returns true if the given type is an enum type.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
type_is_enum(Oid typid)
|
|
|
|
{
|
|
|
|
return (get_typtype(typid) == TYPTYPE_ENUM);
|
2006-09-28 22:51:43 +02:00
|
|
|
}
|
|
|
|
|
2011-11-03 12:16:28 +01:00
|
|
|
/*
|
|
|
|
* type_is_range
|
2011-11-21 05:50:27 +01:00
|
|
|
* Returns true if the given type is a range type.
|
2011-11-03 12:16:28 +01:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
type_is_range(Oid typid)
|
|
|
|
{
|
|
|
|
return (get_typtype(typid) == TYPTYPE_RANGE);
|
|
|
|
}
|
|
|
|
|
Multirange datatypes
Multiranges are basically sorted arrays of non-overlapping ranges with
set-theoretic operations defined over them.
Since v14, each range type automatically gets a corresponding multirange
datatype. There are both manual and automatic mechanisms for naming multirange
types. Once can specify multirange type name using multirange_type_name
attribute in CREATE TYPE. Otherwise, a multirange type name is generated
automatically. If the range type name contains "range" then we change that to
"multirange". Otherwise, we add "_multirange" to the end.
Implementation of multiranges comes with a space-efficient internal
representation format, which evades extra paddings and duplicated storage of
oids. Altogether this format allows fetching a particular range by its index
in O(n).
Statistic gathering and selectivity estimation are implemented for multiranges.
For this purpose, stored multirange is approximated as union range without gaps.
This field will likely need improvements in the future.
Catversion is bumped.
Discussion: https://postgr.es/m/CALNJ-vSUpQ_Y%3DjXvTxt1VYFztaBSsWVXeF1y6gTYQ4bOiWDLgQ%40mail.gmail.com
Discussion: https://postgr.es/m/a0b8026459d1e6167933be2104a6174e7d40d0ab.camel%40j-davis.com#fe7218c83b08068bfffb0c5293eceda0
Author: Paul Jungwirth, revised by me
Reviewed-by: David Fetter, Corey Huinker, Jeff Davis, Pavel Stehule
Reviewed-by: Alvaro Herrera, Tom Lane, Isaac Morland, David G. Johnston
Reviewed-by: Zhihong Yu, Alexander Korotkov
2020-12-20 05:20:33 +01:00
|
|
|
/*
|
|
|
|
* type_is_multirange
|
|
|
|
* Returns true if the given type is a multirange type.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
type_is_multirange(Oid typid)
|
|
|
|
{
|
|
|
|
return (get_typtype(typid) == TYPTYPE_MULTIRANGE);
|
|
|
|
}
|
|
|
|
|
Replace the hard-wired type knowledge in TypeCategory() and IsPreferredType()
with system catalog lookups, as was foreseen to be necessary almost since
their creation. Instead put the information into two new pg_type columns,
typcategory and typispreferred. Add support for setting these when
creating a user-defined base type.
The category column is just a "char" (i.e. a poor man's enum), allowing
a crude form of user extensibility of the category list: just use an
otherwise-unused character. This seems sufficient for foreseen uses,
but we could upgrade to having an actual category catalog someday, if
there proves to be a huge demand for custom type categories.
In this patch I have attempted to hew exactly to the behavior of the
previous hardwired logic, except for introducing new type categories for
arrays, composites, and enums. In particular the default preferred state
for user-defined types remains TRUE. That seems worth revisiting, but it
should be done as a separate patch from introducing the infrastructure.
Likewise, any adjustment of the standard set of categories should be done
separately.
2008-07-30 19:05:05 +02:00
|
|
|
/*
|
|
|
|
* get_type_category_preferred
|
|
|
|
*
|
|
|
|
* Given the type OID, fetch its category and preferred-type status.
|
|
|
|
* Throws error on failure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
get_type_category_preferred(Oid typid, char *typcategory, bool *typispreferred)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
Form_pg_type typtup;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
Replace the hard-wired type knowledge in TypeCategory() and IsPreferredType()
with system catalog lookups, as was foreseen to be necessary almost since
their creation. Instead put the information into two new pg_type columns,
typcategory and typispreferred. Add support for setting these when
creating a user-defined base type.
The category column is just a "char" (i.e. a poor man's enum), allowing
a crude form of user extensibility of the category list: just use an
otherwise-unused character. This seems sufficient for foreseen uses,
but we could upgrade to having an actual category catalog someday, if
there proves to be a huge demand for custom type categories.
In this patch I have attempted to hew exactly to the behavior of the
previous hardwired logic, except for introducing new type categories for
arrays, composites, and enums. In particular the default preferred state
for user-defined types remains TRUE. That seems worth revisiting, but it
should be done as a separate patch from introducing the infrastructure.
Likewise, any adjustment of the standard set of categories should be done
separately.
2008-07-30 19:05:05 +02:00
|
|
|
if (!HeapTupleIsValid(tp))
|
|
|
|
elog(ERROR, "cache lookup failed for type %u", typid);
|
|
|
|
typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
*typcategory = typtup->typcategory;
|
|
|
|
*typispreferred = typtup->typispreferred;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
|
|
|
|
2002-09-20 01:40:56 +02:00
|
|
|
/*
|
|
|
|
* get_typ_typrelid
|
|
|
|
*
|
|
|
|
* Given the type OID, get the typrelid (InvalidOid if not a complex
|
|
|
|
* type).
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_typ_typrelid(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2002-09-20 01:40:56 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = typtup->typrelid;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2003-04-09 01:20:04 +02:00
|
|
|
/*
|
|
|
|
* get_element_type
|
|
|
|
*
|
|
|
|
* Given the type OID, get the typelem (InvalidOid if not an array type).
|
|
|
|
*
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
* NB: this only succeeds for "true" arrays having array_subscript_handler
|
|
|
|
* as typsubscript. For other types, InvalidOid is returned independently
|
|
|
|
* of whether they have typelem or typsubscript set.
|
2003-04-09 01:20:04 +02:00
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_element_type(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2003-04-09 01:20:04 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
if (IsTrueArrayType(typtup))
|
2003-04-09 01:20:04 +02:00
|
|
|
result = typtup->typelem;
|
|
|
|
else
|
|
|
|
result = InvalidOid;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_array_type
|
|
|
|
*
|
Support arrays of composite types, including the rowtypes of regular tables
and views (but not system catalogs, nor sequences or toast tables). Get rid
of the hardwired convention that a type's array type is named exactly "_type",
instead using a new column pg_type.typarray to provide the linkage. (It still
will be named "_type", though, except in odd corner cases such as
maximum-length type names.)
Along the way, make tracking of owner and schema dependencies for types more
uniform: a type directly created by the user has these dependencies, while a
table rowtype or auto-generated array type does not have them, but depends on
its parent object instead.
David Fetter, Andrew Dunstan, Tom Lane
2007-05-11 19:57:14 +02:00
|
|
|
* Given the type OID, get the corresponding "true" array type.
|
2003-04-09 01:20:04 +02:00
|
|
|
* Returns InvalidOid if no array type can be found.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_array_type(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
Support arrays of composite types, including the rowtypes of regular tables
and views (but not system catalogs, nor sequences or toast tables). Get rid
of the hardwired convention that a type's array type is named exactly "_type",
instead using a new column pg_type.typarray to provide the linkage. (It still
will be named "_type", though, except in odd corner cases such as
maximum-length type names.)
Along the way, make tracking of owner and schema dependencies for types more
uniform: a type directly created by the user has these dependencies, while a
table rowtype or auto-generated array type does not have them, but depends on
its parent object instead.
David Fetter, Andrew Dunstan, Tom Lane
2007-05-11 19:57:14 +02:00
|
|
|
Oid result = InvalidOid;
|
2003-04-09 01:20:04 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2003-04-09 01:20:04 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
Support arrays of composite types, including the rowtypes of regular tables
and views (but not system catalogs, nor sequences or toast tables). Get rid
of the hardwired convention that a type's array type is named exactly "_type",
instead using a new column pg_type.typarray to provide the linkage. (It still
will be named "_type", though, except in odd corner cases such as
maximum-length type names.)
Along the way, make tracking of owner and schema dependencies for types more
uniform: a type directly created by the user has these dependencies, while a
table rowtype or auto-generated array type does not have them, but depends on
its parent object instead.
David Fetter, Andrew Dunstan, Tom Lane
2007-05-11 19:57:14 +02:00
|
|
|
result = ((Form_pg_type) GETSTRUCT(tp))->typarray;
|
2003-04-09 01:20:04 +02:00
|
|
|
ReleaseSysCache(tp);
|
|
|
|
}
|
Support arrays of composite types, including the rowtypes of regular tables
and views (but not system catalogs, nor sequences or toast tables). Get rid
of the hardwired convention that a type's array type is named exactly "_type",
instead using a new column pg_type.typarray to provide the linkage. (It still
will be named "_type", though, except in odd corner cases such as
maximum-length type names.)
Along the way, make tracking of owner and schema dependencies for types more
uniform: a type directly created by the user has these dependencies, while a
table rowtype or auto-generated array type does not have them, but depends on
its parent object instead.
David Fetter, Andrew Dunstan, Tom Lane
2007-05-11 19:57:14 +02:00
|
|
|
return result;
|
2003-04-09 01:20:04 +02:00
|
|
|
}
|
|
|
|
|
2014-11-25 18:21:22 +01:00
|
|
|
/*
|
|
|
|
* get_promoted_array_type
|
|
|
|
*
|
|
|
|
* The "promoted" type is what you'd get from an ARRAY(SELECT ...)
|
|
|
|
* construct, that is, either the corresponding "true" array type
|
|
|
|
* if the input is a scalar type that has such an array type,
|
|
|
|
* or the same type if the input is already a "true" array type.
|
|
|
|
* Returns InvalidOid if neither rule is satisfied.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_promoted_array_type(Oid typid)
|
|
|
|
{
|
|
|
|
Oid array_type = get_array_type(typid);
|
|
|
|
|
|
|
|
if (OidIsValid(array_type))
|
|
|
|
return array_type;
|
|
|
|
if (OidIsValid(get_element_type(typid)))
|
|
|
|
return typid;
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
Improve handling of domains over arrays.
This patch eliminates various bizarre behaviors caused by sloppy thinking
about the difference between a domain type and its underlying array type.
In particular, the operation of updating one element of such an array
has to be considered as yielding a value of the underlying array type,
*not* a value of the domain, because there's no assurance that the
domain's CHECK constraints are still satisfied. If we're intending to
store the result back into a domain column, we have to re-cast to the
domain type so that constraints are re-checked.
For similar reasons, such a domain can't be blindly matched to an ANYARRAY
polymorphic parameter, because the polymorphic function is likely to apply
array-ish operations that could invalidate the domain constraints. For the
moment, we just forbid such matching. We might later wish to insert an
automatic downcast to the underlying array type, but such a change should
also change matching of domains to ANYELEMENT for consistency.
To ensure that all such logic is rechecked, this patch removes the original
hack of setting a domain's pg_type.typelem field to match its base type;
the typelem will always be zero instead. In those places where it's really
okay to look through the domain type with no other logic changes, use the
newly added get_base_element_type function in place of get_element_type.
catversion bumped due to change in pg_type contents.
Per bug #5717 from Richard Huxton and subsequent discussion.
2010-10-21 22:07:17 +02:00
|
|
|
/*
|
|
|
|
* get_base_element_type
|
|
|
|
* Given the type OID, get the typelem, looking "through" any domain
|
|
|
|
* to its underlying array type.
|
|
|
|
*
|
|
|
|
* This is equivalent to get_element_type(getBaseType(typid)), but avoids
|
|
|
|
* an extra cache lookup. Note that it fails to provide any information
|
|
|
|
* about the typmod of the array.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_base_element_type(Oid typid)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We loop to find the bottom base type in a stack of domains.
|
|
|
|
*/
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
HeapTuple tup;
|
|
|
|
Form_pg_type typTup;
|
|
|
|
|
|
|
|
tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
|
|
|
if (!HeapTupleIsValid(tup))
|
|
|
|
break;
|
|
|
|
typTup = (Form_pg_type) GETSTRUCT(tup);
|
|
|
|
if (typTup->typtype != TYPTYPE_DOMAIN)
|
|
|
|
{
|
|
|
|
/* Not a domain, so stop descending */
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
/* This test must match get_element_type */
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
if (IsTrueArrayType(typTup))
|
Improve handling of domains over arrays.
This patch eliminates various bizarre behaviors caused by sloppy thinking
about the difference between a domain type and its underlying array type.
In particular, the operation of updating one element of such an array
has to be considered as yielding a value of the underlying array type,
*not* a value of the domain, because there's no assurance that the
domain's CHECK constraints are still satisfied. If we're intending to
store the result back into a domain column, we have to re-cast to the
domain type so that constraints are re-checked.
For similar reasons, such a domain can't be blindly matched to an ANYARRAY
polymorphic parameter, because the polymorphic function is likely to apply
array-ish operations that could invalidate the domain constraints. For the
moment, we just forbid such matching. We might later wish to insert an
automatic downcast to the underlying array type, but such a change should
also change matching of domains to ANYELEMENT for consistency.
To ensure that all such logic is rechecked, this patch removes the original
hack of setting a domain's pg_type.typelem field to match its base type;
the typelem will always be zero instead. In those places where it's really
okay to look through the domain type with no other logic changes, use the
newly added get_base_element_type function in place of get_element_type.
catversion bumped due to change in pg_type contents.
Per bug #5717 from Richard Huxton and subsequent discussion.
2010-10-21 22:07:17 +02:00
|
|
|
result = typTup->typelem;
|
|
|
|
else
|
|
|
|
result = InvalidOid;
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
typid = typTup->typbasetype;
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Like get_element_type, silently return InvalidOid for bogus input */
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2002-08-29 02:17:06 +02:00
|
|
|
/*
|
|
|
|
* getTypeInputInfo
|
|
|
|
*
|
|
|
|
* Get info needed for converting values of a type to internal form
|
|
|
|
*/
|
|
|
|
void
|
2004-06-06 02:41:28 +02:00
|
|
|
getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam)
|
2002-08-29 02:17:06 +02:00
|
|
|
{
|
|
|
|
HeapTuple typeTuple;
|
|
|
|
Form_pg_type pt;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type));
|
2002-08-29 02:17:06 +02:00
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", type);
|
2002-08-29 02:17:06 +02:00
|
|
|
pt = (Form_pg_type) GETSTRUCT(typeTuple);
|
|
|
|
|
|
|
|
if (!pt->typisdefined)
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("type %s is only a shell",
|
|
|
|
format_type_be(type))));
|
2003-05-09 20:08:48 +02:00
|
|
|
if (!OidIsValid(pt->typinput))
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
|
|
|
errmsg("no input function available for type %s",
|
|
|
|
format_type_be(type))));
|
2002-08-29 02:17:06 +02:00
|
|
|
|
|
|
|
*typInput = pt->typinput;
|
2004-06-06 02:41:28 +02:00
|
|
|
*typIOParam = getTypeIOParam(typeTuple);
|
2002-08-29 02:17:06 +02:00
|
|
|
|
|
|
|
ReleaseSysCache(typeTuple);
|
|
|
|
}
|
|
|
|
|
2002-08-22 02:01:51 +02:00
|
|
|
/*
|
|
|
|
* getTypeOutputInfo
|
|
|
|
*
|
|
|
|
* Get info needed for printing values of a type
|
|
|
|
*/
|
2003-05-09 20:08:48 +02:00
|
|
|
void
|
2005-05-01 20:56:19 +02:00
|
|
|
getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena)
|
2002-08-22 02:01:51 +02:00
|
|
|
{
|
|
|
|
HeapTuple typeTuple;
|
|
|
|
Form_pg_type pt;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type));
|
2002-08-22 02:01:51 +02:00
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", type);
|
2002-08-22 02:01:51 +02:00
|
|
|
pt = (Form_pg_type) GETSTRUCT(typeTuple);
|
|
|
|
|
2003-05-09 20:08:48 +02:00
|
|
|
if (!pt->typisdefined)
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("type %s is only a shell",
|
|
|
|
format_type_be(type))));
|
2003-05-09 20:08:48 +02:00
|
|
|
if (!OidIsValid(pt->typoutput))
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
|
|
|
errmsg("no output function available for type %s",
|
|
|
|
format_type_be(type))));
|
2003-05-09 20:08:48 +02:00
|
|
|
|
2002-08-22 02:01:51 +02:00
|
|
|
*typOutput = pt->typoutput;
|
|
|
|
*typIsVarlena = (!pt->typbyval) && (pt->typlen == -1);
|
2003-05-09 20:08:48 +02:00
|
|
|
|
|
|
|
ReleaseSysCache(typeTuple);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* getTypeBinaryInputInfo
|
|
|
|
*
|
|
|
|
* Get info needed for binary input of values of a type
|
|
|
|
*/
|
|
|
|
void
|
2004-06-06 02:41:28 +02:00
|
|
|
getTypeBinaryInputInfo(Oid type, Oid *typReceive, Oid *typIOParam)
|
2003-05-09 20:08:48 +02:00
|
|
|
{
|
|
|
|
HeapTuple typeTuple;
|
|
|
|
Form_pg_type pt;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type));
|
2003-05-09 20:08:48 +02:00
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", type);
|
2003-05-09 20:08:48 +02:00
|
|
|
pt = (Form_pg_type) GETSTRUCT(typeTuple);
|
|
|
|
|
|
|
|
if (!pt->typisdefined)
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("type %s is only a shell",
|
|
|
|
format_type_be(type))));
|
2003-05-09 20:08:48 +02:00
|
|
|
if (!OidIsValid(pt->typreceive))
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
|
|
|
errmsg("no binary input function available for type %s",
|
|
|
|
format_type_be(type))));
|
2003-05-09 20:08:48 +02:00
|
|
|
|
|
|
|
*typReceive = pt->typreceive;
|
2004-06-06 02:41:28 +02:00
|
|
|
*typIOParam = getTypeIOParam(typeTuple);
|
2003-05-09 20:08:48 +02:00
|
|
|
|
|
|
|
ReleaseSysCache(typeTuple);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* getTypeBinaryOutputInfo
|
|
|
|
*
|
|
|
|
* Get info needed for binary output of values of a type
|
|
|
|
*/
|
|
|
|
void
|
2005-05-01 20:56:19 +02:00
|
|
|
getTypeBinaryOutputInfo(Oid type, Oid *typSend, bool *typIsVarlena)
|
2003-05-09 20:08:48 +02:00
|
|
|
{
|
|
|
|
HeapTuple typeTuple;
|
|
|
|
Form_pg_type pt;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type));
|
2003-05-09 20:08:48 +02:00
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", type);
|
2003-05-09 20:08:48 +02:00
|
|
|
pt = (Form_pg_type) GETSTRUCT(typeTuple);
|
|
|
|
|
|
|
|
if (!pt->typisdefined)
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("type %s is only a shell",
|
|
|
|
format_type_be(type))));
|
2003-05-09 20:08:48 +02:00
|
|
|
if (!OidIsValid(pt->typsend))
|
2003-07-25 22:18:01 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_FUNCTION),
|
|
|
|
errmsg("no binary output function available for type %s",
|
|
|
|
format_type_be(type))));
|
2003-05-09 20:08:48 +02:00
|
|
|
|
|
|
|
*typSend = pt->typsend;
|
|
|
|
*typIsVarlena = (!pt->typbyval) && (pt->typlen == -1);
|
|
|
|
|
2002-08-22 02:01:51 +02:00
|
|
|
ReleaseSysCache(typeTuple);
|
|
|
|
}
|
|
|
|
|
2006-12-30 22:21:56 +01:00
|
|
|
/*
|
|
|
|
* get_typmodin
|
|
|
|
*
|
|
|
|
* Given the type OID, return the type's typmodin procedure, if any.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_typmodin(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2006-12-30 22:21:56 +01:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = typtup->typmodin;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef NOT_USED
|
|
|
|
/*
|
|
|
|
* get_typmodout
|
|
|
|
*
|
|
|
|
* Given the type OID, return the type's typmodout procedure, if any.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_typmodout(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
2006-12-30 22:21:56 +01:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = typtup->typmodout;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
#endif /* NOT_USED */
|
|
|
|
|
2011-02-08 22:04:18 +01:00
|
|
|
/*
|
|
|
|
* get_typcollation
|
|
|
|
*
|
|
|
|
* Given the type OID, return the type's typcollation attribute.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_typcollation(Oid typid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = typtup->typcollation;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* type_is_collatable
|
|
|
|
*
|
|
|
|
* Return whether the type cares about collations
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
type_is_collatable(Oid typid)
|
|
|
|
{
|
|
|
|
return OidIsValid(get_typcollation(typid));
|
|
|
|
}
|
|
|
|
|
2001-05-07 02:43:27 +02:00
|
|
|
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
/*
|
|
|
|
* get_typsubscript
|
|
|
|
*
|
|
|
|
* Given the type OID, return the type's subscripting handler's OID,
|
|
|
|
* if it has one.
|
|
|
|
*
|
|
|
|
* If typelemp isn't NULL, we also store the type's typelem value there.
|
|
|
|
* This saves some callers an extra catalog lookup.
|
|
|
|
*/
|
|
|
|
RegProcedure
|
|
|
|
get_typsubscript(Oid typid, Oid *typelemp)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_type typform = (Form_pg_type) GETSTRUCT(tp);
|
|
|
|
RegProcedure handler = typform->typsubscript;
|
|
|
|
|
|
|
|
if (typelemp)
|
|
|
|
*typelemp = typform->typelem;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return handler;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (typelemp)
|
|
|
|
*typelemp = InvalidOid;
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* getSubscriptingRoutines
|
|
|
|
*
|
|
|
|
* Given the type OID, fetch the type's subscripting methods struct.
|
2020-12-11 23:54:10 +01:00
|
|
|
* Return NULL if type is not subscriptable.
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
*
|
|
|
|
* If typelemp isn't NULL, we also store the type's typelem value there.
|
|
|
|
* This saves some callers an extra catalog lookup.
|
|
|
|
*/
|
|
|
|
const struct SubscriptRoutines *
|
|
|
|
getSubscriptingRoutines(Oid typid, Oid *typelemp)
|
|
|
|
{
|
|
|
|
RegProcedure typsubscript = get_typsubscript(typid, typelemp);
|
|
|
|
|
|
|
|
if (!OidIsValid(typsubscript))
|
2020-12-11 23:54:10 +01:00
|
|
|
return NULL;
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
|
|
|
|
return (const struct SubscriptRoutines *)
|
|
|
|
DatumGetPointer(OidFunctionCall0(typsubscript));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2001-05-07 02:43:27 +02:00
|
|
|
/* ---------- STATISTICS CACHE ---------- */
|
|
|
|
|
2001-05-09 02:35:09 +02:00
|
|
|
/*
|
|
|
|
* get_attavgwidth
|
|
|
|
*
|
|
|
|
* Given the table and attribute number of a column, get the average
|
|
|
|
* width of entries in the column. Return zero if no data available.
|
2008-09-28 21:51:40 +02:00
|
|
|
*
|
2009-12-29 21:11:45 +01:00
|
|
|
* Currently this is only consulted for individual tables, not for inheritance
|
|
|
|
* trees, so we don't need an "inh" parameter.
|
|
|
|
*
|
2008-09-28 21:51:40 +02:00
|
|
|
* Calling a hook at this point looks somewhat strange, but is required
|
|
|
|
* because the optimizer calls this function without any other way for
|
|
|
|
* plug-ins to control the result.
|
2001-05-09 02:35:09 +02:00
|
|
|
*/
|
|
|
|
int32
|
|
|
|
get_attavgwidth(Oid relid, AttrNumber attnum)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
2008-09-28 21:51:40 +02:00
|
|
|
int32 stawidth;
|
2001-05-09 02:35:09 +02:00
|
|
|
|
2008-09-28 21:51:40 +02:00
|
|
|
if (get_attavgwidth_hook)
|
|
|
|
{
|
|
|
|
stawidth = (*get_attavgwidth_hook) (relid, attnum);
|
|
|
|
if (stawidth > 0)
|
|
|
|
return stawidth;
|
|
|
|
}
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache3(STATRELATTINH,
|
|
|
|
ObjectIdGetDatum(relid),
|
|
|
|
Int16GetDatum(attnum),
|
|
|
|
BoolGetDatum(false));
|
2001-05-09 02:35:09 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
2008-09-28 21:51:40 +02:00
|
|
|
stawidth = ((Form_pg_statistic) GETSTRUCT(tp))->stawidth;
|
2001-05-09 02:35:09 +02:00
|
|
|
ReleaseSysCache(tp);
|
|
|
|
if (stawidth > 0)
|
|
|
|
return stawidth;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2001-05-07 02:43:27 +02:00
|
|
|
/*
|
|
|
|
* get_attstatsslot
|
|
|
|
*
|
|
|
|
* Extract the contents of a "slot" of a pg_statistic tuple.
|
2017-08-16 06:22:32 +02:00
|
|
|
* Returns true if requested slot type was found, else false.
|
2001-05-07 02:43:27 +02:00
|
|
|
*
|
|
|
|
* Unlike other routines in this file, this takes a pointer to an
|
|
|
|
* already-looked-up tuple in the pg_statistic cache. We do this since
|
|
|
|
* most callers will want to extract more than one value from the cache
|
|
|
|
* entry, and we don't want to repeat the cache lookup unnecessarily.
|
2008-09-28 21:51:40 +02:00
|
|
|
* Also, this API allows this routine to be used with statistics tuples
|
|
|
|
* that have been provided by a stats hook and didn't really come from
|
|
|
|
* pg_statistic.
|
2001-05-07 02:43:27 +02:00
|
|
|
*
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* sslot: pointer to output area (typically, a local variable in the caller).
|
2017-01-25 20:35:31 +01:00
|
|
|
* statstuple: pg_statistic tuple to be examined.
|
2001-05-07 02:43:27 +02:00
|
|
|
* reqkind: STAKIND code for desired statistics slot kind.
|
|
|
|
* reqop: STAOP value wanted, or InvalidOid if don't care.
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* flags: bitmask of ATTSTATSSLOT_VALUES and/or ATTSTATSSLOT_NUMBERS.
|
2001-05-07 02:43:27 +02:00
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* If a matching slot is found, true is returned, and *sslot is filled thus:
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* staop: receives the actual STAOP value.
|
Make pg_statistic and related code account more honestly for collations.
When we first put in collations support, we basically punted on teaching
pg_statistic, ANALYZE, and the planner selectivity functions about that.
They've just used DEFAULT_COLLATION_OID independently of the actual
collation of the data. It's time to improve that, so:
* Add columns to pg_statistic that record the specific collation associated
with each statistics slot.
* Teach ANALYZE to use the column's actual collation when comparing values
for statistical purposes, and record this in the appropriate slot. (Note
that type-specific typanalyze functions are now expected to fill
stats->stacoll with the appropriate collation, too.)
* Teach assorted selectivity functions to use the actual collation of
the stats they are looking at, instead of just assuming it's
DEFAULT_COLLATION_OID.
This should give noticeably better results in selectivity estimates for
columns with nondefault collations, at least for query clauses that use
that same collation (which would be the default behavior in most cases).
It's still true that comparisons with explicit COLLATE clauses different
from the stored data's collation won't be well-estimated, but that's no
worse than before. Also, this patch does make the first step towards
doing better with that, which is that it's now theoretically possible to
collect stats for a collation other than the column's own collation.
Patch by me; thanks to Peter Eisentraut for review.
Discussion: https://postgr.es/m/14706.1544630227@sss.pgh.pa.us
2018-12-14 18:52:49 +01:00
|
|
|
* stacoll: receives the actual STACOLL value.
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* valuetype: receives actual datatype of the elements of stavalues.
|
|
|
|
* values: receives pointer to an array of the slot's stavalues.
|
|
|
|
* nvalues: receives number of stavalues.
|
|
|
|
* numbers: receives pointer to an array of the slot's stanumbers (as float4).
|
|
|
|
* nnumbers: receives number of stanumbers.
|
2010-07-10 00:57:39 +02:00
|
|
|
*
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* valuetype/values/nvalues are InvalidOid/NULL/0 if ATTSTATSSLOT_VALUES
|
|
|
|
* wasn't specified. Likewise, numbers/nnumbers are NULL/0 if
|
|
|
|
* ATTSTATSSLOT_NUMBERS wasn't specified.
|
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* If no matching slot is found, false is returned, and *sslot is zeroed.
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
*
|
Make pg_statistic and related code account more honestly for collations.
When we first put in collations support, we basically punted on teaching
pg_statistic, ANALYZE, and the planner selectivity functions about that.
They've just used DEFAULT_COLLATION_OID independently of the actual
collation of the data. It's time to improve that, so:
* Add columns to pg_statistic that record the specific collation associated
with each statistics slot.
* Teach ANALYZE to use the column's actual collation when comparing values
for statistical purposes, and record this in the appropriate slot. (Note
that type-specific typanalyze functions are now expected to fill
stats->stacoll with the appropriate collation, too.)
* Teach assorted selectivity functions to use the actual collation of
the stats they are looking at, instead of just assuming it's
DEFAULT_COLLATION_OID.
This should give noticeably better results in selectivity estimates for
columns with nondefault collations, at least for query clauses that use
that same collation (which would be the default behavior in most cases).
It's still true that comparisons with explicit COLLATE clauses different
from the stored data's collation won't be well-estimated, but that's no
worse than before. Also, this patch does make the first step towards
doing better with that, which is that it's now theoretically possible to
collect stats for a collation other than the column's own collation.
Patch by me; thanks to Peter Eisentraut for review.
Discussion: https://postgr.es/m/14706.1544630227@sss.pgh.pa.us
2018-12-14 18:52:49 +01:00
|
|
|
* Note that the current API doesn't allow for searching for a slot with
|
|
|
|
* a particular collation. If we ever actually support recording more than
|
|
|
|
* one collation, we'll have to extend the API, but for now simple is good.
|
|
|
|
*
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* The data referred to by the fields of sslot is locally palloc'd and
|
|
|
|
* is independent of the original pg_statistic tuple. When the caller
|
|
|
|
* is done with it, call free_attstatsslot to release the palloc'd data.
|
|
|
|
*
|
|
|
|
* If it's desirable to call free_attstatsslot when get_attstatsslot might
|
|
|
|
* not have been called, memset'ing sslot to zeroes will allow that.
|
2022-11-18 17:01:03 +01:00
|
|
|
*
|
|
|
|
* Passing flags=0 can be useful to quickly check if the requested slot type
|
|
|
|
* exists. In this case no arrays are extracted, so free_attstatsslot need
|
|
|
|
* not be called.
|
2001-05-07 02:43:27 +02:00
|
|
|
*/
|
|
|
|
bool
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple,
|
|
|
|
int reqkind, Oid reqop, int flags)
|
2001-05-07 02:43:27 +02:00
|
|
|
{
|
|
|
|
Form_pg_statistic stats = (Form_pg_statistic) GETSTRUCT(statstuple);
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
int i;
|
2001-05-07 02:43:27 +02:00
|
|
|
Datum val;
|
|
|
|
ArrayType *statarray;
|
2010-07-10 00:57:39 +02:00
|
|
|
Oid arrayelemtype;
|
2001-05-07 02:43:27 +02:00
|
|
|
int narrayelem;
|
|
|
|
HeapTuple typeTuple;
|
2003-03-23 06:14:37 +01:00
|
|
|
Form_pg_type typeForm;
|
2001-05-07 02:43:27 +02:00
|
|
|
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
/* initialize *sslot properly */
|
|
|
|
memset(sslot, 0, sizeof(AttStatsSlot));
|
|
|
|
|
2001-05-07 02:43:27 +02:00
|
|
|
for (i = 0; i < STATISTIC_NUM_SLOTS; i++)
|
|
|
|
{
|
|
|
|
if ((&stats->stakind1)[i] == reqkind &&
|
|
|
|
(reqop == InvalidOid || (&stats->staop1)[i] == reqop))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i >= STATISTIC_NUM_SLOTS)
|
|
|
|
return false; /* not there */
|
|
|
|
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
sslot->staop = (&stats->staop1)[i];
|
Make pg_statistic and related code account more honestly for collations.
When we first put in collations support, we basically punted on teaching
pg_statistic, ANALYZE, and the planner selectivity functions about that.
They've just used DEFAULT_COLLATION_OID independently of the actual
collation of the data. It's time to improve that, so:
* Add columns to pg_statistic that record the specific collation associated
with each statistics slot.
* Teach ANALYZE to use the column's actual collation when comparing values
for statistical purposes, and record this in the appropriate slot. (Note
that type-specific typanalyze functions are now expected to fill
stats->stacoll with the appropriate collation, too.)
* Teach assorted selectivity functions to use the actual collation of
the stats they are looking at, instead of just assuming it's
DEFAULT_COLLATION_OID.
This should give noticeably better results in selectivity estimates for
columns with nondefault collations, at least for query clauses that use
that same collation (which would be the default behavior in most cases).
It's still true that comparisons with explicit COLLATE clauses different
from the stored data's collation won't be well-estimated, but that's no
worse than before. Also, this patch does make the first step towards
doing better with that, which is that it's now theoretically possible to
collect stats for a collation other than the column's own collation.
Patch by me; thanks to Peter Eisentraut for review.
Discussion: https://postgr.es/m/14706.1544630227@sss.pgh.pa.us
2018-12-14 18:52:49 +01:00
|
|
|
sslot->stacoll = (&stats->stacoll1)[i];
|
|
|
|
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
if (flags & ATTSTATSSLOT_VALUES)
|
2001-05-07 02:43:27 +02:00
|
|
|
{
|
2023-03-25 22:49:33 +01:00
|
|
|
val = SysCacheGetAttrNotNull(STATRELATTINH, statstuple,
|
|
|
|
Anum_pg_statistic_stavalues1 + i);
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2010-07-10 00:57:39 +02:00
|
|
|
/*
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* Detoast the array if needed, and in any case make a copy that's
|
|
|
|
* under control of this AttStatsSlot.
|
2010-07-10 00:57:39 +02:00
|
|
|
*/
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
statarray = DatumGetArrayTypePCopy(val);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the actual array element type, and pass it back in case the
|
|
|
|
* caller needs it.
|
|
|
|
*/
|
|
|
|
sslot->valuetype = arrayelemtype = ARR_ELEMTYPE(statarray);
|
|
|
|
|
|
|
|
/* Need info about element type */
|
2010-07-10 00:57:39 +02:00
|
|
|
typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(arrayelemtype));
|
2001-05-07 02:43:27 +02:00
|
|
|
if (!HeapTupleIsValid(typeTuple))
|
2010-07-10 00:57:39 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", arrayelemtype);
|
2003-03-23 06:14:37 +01:00
|
|
|
typeForm = (Form_pg_type) GETSTRUCT(typeTuple);
|
|
|
|
|
2005-11-17 23:14:56 +01:00
|
|
|
/* Deconstruct array into Datum elements; NULLs not expected */
|
2003-03-23 06:14:37 +01:00
|
|
|
deconstruct_array(statarray,
|
2010-07-10 00:57:39 +02:00
|
|
|
arrayelemtype,
|
2003-03-23 06:14:37 +01:00
|
|
|
typeForm->typlen,
|
|
|
|
typeForm->typbyval,
|
|
|
|
typeForm->typalign,
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
&sslot->values, NULL, &sslot->nvalues);
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2001-05-07 02:43:27 +02:00
|
|
|
/*
|
2003-03-23 06:14:37 +01:00
|
|
|
* If the element type is pass-by-reference, we now have a bunch of
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
* Datums that are pointers into the statarray, so we need to keep
|
|
|
|
* that until free_attstatsslot. Otherwise, all the useful info is in
|
|
|
|
* sslot->values[], so we can free the array object immediately.
|
2001-05-07 02:43:27 +02:00
|
|
|
*/
|
2003-03-23 06:14:37 +01:00
|
|
|
if (!typeForm->typbyval)
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
sslot->values_arr = statarray;
|
|
|
|
else
|
|
|
|
pfree(statarray);
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2003-03-23 06:14:37 +01:00
|
|
|
ReleaseSysCache(typeTuple);
|
2001-05-07 02:43:27 +02:00
|
|
|
}
|
|
|
|
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
if (flags & ATTSTATSSLOT_NUMBERS)
|
2001-05-07 02:43:27 +02:00
|
|
|
{
|
2023-03-25 22:49:33 +01:00
|
|
|
val = SysCacheGetAttrNotNull(STATRELATTINH, statstuple,
|
|
|
|
Anum_pg_statistic_stanumbers1 + i);
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Detoast the array if needed, and in any case make a copy that's
|
|
|
|
* under control of this AttStatsSlot.
|
|
|
|
*/
|
|
|
|
statarray = DatumGetArrayTypePCopy(val);
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2001-05-07 02:43:27 +02:00
|
|
|
/*
|
|
|
|
* We expect the array to be a 1-D float4 array; verify that. We don't
|
|
|
|
* need to use deconstruct_array() since the array data is just going
|
|
|
|
* to look like a C array of float4 values.
|
|
|
|
*/
|
|
|
|
narrayelem = ARR_DIMS(statarray)[0];
|
|
|
|
if (ARR_NDIM(statarray) != 1 || narrayelem <= 0 ||
|
2005-11-17 23:14:56 +01:00
|
|
|
ARR_HASNULL(statarray) ||
|
2002-08-26 19:54:02 +02:00
|
|
|
ARR_ELEMTYPE(statarray) != FLOAT4OID)
|
2003-07-25 22:18:01 +02:00
|
|
|
elog(ERROR, "stanumbers is not a 1-D float4 array");
|
2001-10-25 07:50:21 +02:00
|
|
|
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
/* Give caller a pointer directly into the statarray */
|
|
|
|
sslot->numbers = (float4 *) ARR_DATA_PTR(statarray);
|
|
|
|
sslot->nnumbers = narrayelem;
|
|
|
|
|
|
|
|
/* We'll free the statarray in free_attstatsslot */
|
|
|
|
sslot->numbers_arr = statarray;
|
2001-05-07 02:43:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2005-10-11 19:27:14 +02:00
|
|
|
/*
|
|
|
|
* free_attstatsslot
|
|
|
|
* Free data allocated by get_attstatsslot
|
|
|
|
*/
|
2001-05-07 02:43:27 +02:00
|
|
|
void
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
free_attstatsslot(AttStatsSlot *sslot)
|
|
|
|
{
|
|
|
|
/* The values[] array was separately palloc'd by deconstruct_array */
|
|
|
|
if (sslot->values)
|
|
|
|
pfree(sslot->values);
|
|
|
|
/* The numbers[] array points into numbers_arr, do not pfree it */
|
|
|
|
/* Free the detoasted array objects, if any */
|
|
|
|
if (sslot->values_arr)
|
|
|
|
pfree(sslot->values_arr);
|
|
|
|
if (sslot->numbers_arr)
|
|
|
|
pfree(sslot->numbers_arr);
|
2001-05-07 02:43:27 +02:00
|
|
|
}
|
2001-06-14 03:09:22 +02:00
|
|
|
|
2002-04-02 03:03:07 +02:00
|
|
|
/* ---------- PG_NAMESPACE CACHE ---------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_namespace_name
|
|
|
|
* Returns the name of a given namespace
|
|
|
|
*
|
|
|
|
* Returns a palloc'd copy of the string, or NULL if no such namespace.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_namespace_name(Oid nspid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tp = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(nspid));
|
2002-04-02 03:03:07 +02:00
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_namespace nsptup = (Form_pg_namespace) GETSTRUCT(tp);
|
|
|
|
char *result;
|
|
|
|
|
|
|
|
result = pstrdup(NameStr(nsptup->nspname));
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-11-03 12:16:28 +01:00
|
|
|
|
2015-04-06 16:40:55 +02:00
|
|
|
/*
|
|
|
|
* get_namespace_name_or_temp
|
|
|
|
* As above, but if it is this backend's temporary namespace, return
|
|
|
|
* "pg_temp" instead.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_namespace_name_or_temp(Oid nspid)
|
|
|
|
{
|
|
|
|
if (isTempNamespace(nspid))
|
2021-07-27 18:03:16 +02:00
|
|
|
return pstrdup("pg_temp");
|
2015-04-06 16:40:55 +02:00
|
|
|
else
|
|
|
|
return get_namespace_name(nspid);
|
|
|
|
}
|
|
|
|
|
Multirange datatypes
Multiranges are basically sorted arrays of non-overlapping ranges with
set-theoretic operations defined over them.
Since v14, each range type automatically gets a corresponding multirange
datatype. There are both manual and automatic mechanisms for naming multirange
types. Once can specify multirange type name using multirange_type_name
attribute in CREATE TYPE. Otherwise, a multirange type name is generated
automatically. If the range type name contains "range" then we change that to
"multirange". Otherwise, we add "_multirange" to the end.
Implementation of multiranges comes with a space-efficient internal
representation format, which evades extra paddings and duplicated storage of
oids. Altogether this format allows fetching a particular range by its index
in O(n).
Statistic gathering and selectivity estimation are implemented for multiranges.
For this purpose, stored multirange is approximated as union range without gaps.
This field will likely need improvements in the future.
Catversion is bumped.
Discussion: https://postgr.es/m/CALNJ-vSUpQ_Y%3DjXvTxt1VYFztaBSsWVXeF1y6gTYQ4bOiWDLgQ%40mail.gmail.com
Discussion: https://postgr.es/m/a0b8026459d1e6167933be2104a6174e7d40d0ab.camel%40j-davis.com#fe7218c83b08068bfffb0c5293eceda0
Author: Paul Jungwirth, revised by me
Reviewed-by: David Fetter, Corey Huinker, Jeff Davis, Pavel Stehule
Reviewed-by: Alvaro Herrera, Tom Lane, Isaac Morland, David G. Johnston
Reviewed-by: Zhihong Yu, Alexander Korotkov
2020-12-20 05:20:33 +01:00
|
|
|
/* ---------- PG_RANGE CACHES ---------- */
|
2011-11-21 05:50:27 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get_range_subtype
|
|
|
|
* Returns the subtype of a given range type
|
|
|
|
*
|
|
|
|
* Returns InvalidOid if the type is not a range type.
|
|
|
|
*/
|
2011-11-03 12:16:28 +01:00
|
|
|
Oid
|
|
|
|
get_range_subtype(Oid rangeOid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(rangeOid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = rngtup->rngsubtype;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
2018-09-19 00:54:10 +02:00
|
|
|
|
2020-01-31 23:03:55 +01:00
|
|
|
/*
|
|
|
|
* get_range_collation
|
|
|
|
* Returns the collation of a given range type
|
|
|
|
*
|
|
|
|
* Returns InvalidOid if the type is not a range type,
|
|
|
|
* or if its subtype is not collatable.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_range_collation(Oid rangeOid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(rangeOid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = rngtup->rngcollation;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
Multirange datatypes
Multiranges are basically sorted arrays of non-overlapping ranges with
set-theoretic operations defined over them.
Since v14, each range type automatically gets a corresponding multirange
datatype. There are both manual and automatic mechanisms for naming multirange
types. Once can specify multirange type name using multirange_type_name
attribute in CREATE TYPE. Otherwise, a multirange type name is generated
automatically. If the range type name contains "range" then we change that to
"multirange". Otherwise, we add "_multirange" to the end.
Implementation of multiranges comes with a space-efficient internal
representation format, which evades extra paddings and duplicated storage of
oids. Altogether this format allows fetching a particular range by its index
in O(n).
Statistic gathering and selectivity estimation are implemented for multiranges.
For this purpose, stored multirange is approximated as union range without gaps.
This field will likely need improvements in the future.
Catversion is bumped.
Discussion: https://postgr.es/m/CALNJ-vSUpQ_Y%3DjXvTxt1VYFztaBSsWVXeF1y6gTYQ4bOiWDLgQ%40mail.gmail.com
Discussion: https://postgr.es/m/a0b8026459d1e6167933be2104a6174e7d40d0ab.camel%40j-davis.com#fe7218c83b08068bfffb0c5293eceda0
Author: Paul Jungwirth, revised by me
Reviewed-by: David Fetter, Corey Huinker, Jeff Davis, Pavel Stehule
Reviewed-by: Alvaro Herrera, Tom Lane, Isaac Morland, David G. Johnston
Reviewed-by: Zhihong Yu, Alexander Korotkov
2020-12-20 05:20:33 +01:00
|
|
|
/*
|
|
|
|
* get_range_multirange
|
|
|
|
* Returns the multirange type of a given range type
|
|
|
|
*
|
|
|
|
* Returns InvalidOid if the type is not a range type.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_range_multirange(Oid rangeOid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(rangeOid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = rngtup->rngmultitypid;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_multirange_range
|
|
|
|
* Returns the range type of a given multirange
|
|
|
|
*
|
|
|
|
* Returns InvalidOid if the type is not a multirange.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_multirange_range(Oid multirangeOid)
|
|
|
|
{
|
|
|
|
HeapTuple tp;
|
|
|
|
|
|
|
|
tp = SearchSysCache1(RANGEMULTIRANGE, ObjectIdGetDatum(multirangeOid));
|
|
|
|
if (HeapTupleIsValid(tp))
|
|
|
|
{
|
|
|
|
Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
|
|
|
|
Oid result;
|
|
|
|
|
|
|
|
result = rngtup->rngtypid;
|
|
|
|
ReleaseSysCache(tp);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2018-09-19 00:54:10 +02:00
|
|
|
/* ---------- PG_INDEX CACHE ---------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_index_column_opclass
|
|
|
|
*
|
|
|
|
* Given the index OID and column number,
|
|
|
|
* return opclass of the index column
|
2019-09-09 12:50:12 +02:00
|
|
|
* or InvalidOid if the index was not found
|
|
|
|
* or column is non-key one.
|
2018-09-19 00:54:10 +02:00
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_index_column_opclass(Oid index_oid, int attno)
|
|
|
|
{
|
|
|
|
HeapTuple tuple;
|
2021-11-30 14:02:14 +01:00
|
|
|
Form_pg_index rd_index;
|
2018-09-19 00:54:10 +02:00
|
|
|
Datum datum;
|
|
|
|
oidvector *indclass;
|
|
|
|
Oid opclass;
|
|
|
|
|
|
|
|
/* First we need to know the column's opclass. */
|
|
|
|
|
|
|
|
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid));
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
|
|
|
return InvalidOid;
|
|
|
|
|
|
|
|
rd_index = (Form_pg_index) GETSTRUCT(tuple);
|
|
|
|
|
|
|
|
/* caller is supposed to guarantee this */
|
|
|
|
Assert(attno > 0 && attno <= rd_index->indnatts);
|
|
|
|
|
2019-09-09 12:50:12 +02:00
|
|
|
/* Non-key attributes don't have an opclass */
|
|
|
|
if (attno > rd_index->indnkeyatts)
|
|
|
|
{
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2023-03-25 22:49:33 +01:00
|
|
|
datum = SysCacheGetAttrNotNull(INDEXRELID, tuple, Anum_pg_index_indclass);
|
2018-09-19 00:54:10 +02:00
|
|
|
indclass = ((oidvector *) DatumGetPointer(datum));
|
2019-09-09 12:50:12 +02:00
|
|
|
|
|
|
|
Assert(attno <= indclass->dim1);
|
2018-09-19 00:54:10 +02:00
|
|
|
opclass = indclass->values[attno - 1];
|
|
|
|
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
|
|
|
|
return opclass;
|
|
|
|
}
|
2020-03-10 07:38:17 +01:00
|
|
|
|
2020-03-13 11:28:11 +01:00
|
|
|
/*
|
|
|
|
* get_index_isreplident
|
|
|
|
*
|
|
|
|
* Given the index OID, return pg_index.indisreplident.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_index_isreplident(Oid index_oid)
|
|
|
|
{
|
|
|
|
HeapTuple tuple;
|
|
|
|
Form_pg_index rd_index;
|
|
|
|
bool result;
|
|
|
|
|
|
|
|
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid));
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
rd_index = (Form_pg_index) GETSTRUCT(tuple);
|
|
|
|
result = rd_index->indisreplident;
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-03-10 07:38:17 +01:00
|
|
|
/*
|
|
|
|
* get_index_isvalid
|
|
|
|
*
|
|
|
|
* Given the index OID, return pg_index.indisvalid.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_index_isvalid(Oid index_oid)
|
|
|
|
{
|
|
|
|
bool isvalid;
|
|
|
|
HeapTuple tuple;
|
|
|
|
Form_pg_index rd_index;
|
|
|
|
|
|
|
|
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid));
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
|
|
|
elog(ERROR, "cache lookup failed for index %u", index_oid);
|
|
|
|
|
|
|
|
rd_index = (Form_pg_index) GETSTRUCT(tuple);
|
|
|
|
isvalid = rd_index->indisvalid;
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
|
|
|
|
return isvalid;
|
|
|
|
}
|
2020-04-06 04:03:49 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get_index_isclustered
|
|
|
|
*
|
|
|
|
* Given the index OID, return pg_index.indisclustered.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
get_index_isclustered(Oid index_oid)
|
|
|
|
{
|
|
|
|
bool isclustered;
|
|
|
|
HeapTuple tuple;
|
|
|
|
Form_pg_index rd_index;
|
|
|
|
|
|
|
|
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid));
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
|
|
|
elog(ERROR, "cache lookup failed for index %u", index_oid);
|
|
|
|
|
|
|
|
rd_index = (Form_pg_index) GETSTRUCT(tuple);
|
|
|
|
isclustered = rd_index->indisclustered;
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
|
|
|
|
return isclustered;
|
|
|
|
}
|
2022-08-02 07:17:22 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get_publication_oid - given a publication name, look up the OID
|
|
|
|
*
|
|
|
|
* If missing_ok is false, throw an error if name not found. If true, just
|
|
|
|
* return InvalidOid.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_publication_oid(const char *pubname, bool missing_ok)
|
|
|
|
{
|
|
|
|
Oid oid;
|
|
|
|
|
|
|
|
oid = GetSysCacheOid1(PUBLICATIONNAME, Anum_pg_publication_oid,
|
|
|
|
CStringGetDatum(pubname));
|
|
|
|
if (!OidIsValid(oid) && !missing_ok)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("publication \"%s\" does not exist", pubname)));
|
|
|
|
return oid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_publication_name - given a publication Oid, look up the name
|
|
|
|
*
|
|
|
|
* If missing_ok is false, throw an error if name not found. If true, just
|
|
|
|
* return NULL.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_publication_name(Oid pubid, bool missing_ok)
|
|
|
|
{
|
|
|
|
HeapTuple tup;
|
2023-05-19 23:24:48 +02:00
|
|
|
char *pubname;
|
2022-08-02 07:17:22 +02:00
|
|
|
Form_pg_publication pubform;
|
|
|
|
|
|
|
|
tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
|
|
|
|
|
|
|
|
if (!HeapTupleIsValid(tup))
|
|
|
|
{
|
|
|
|
if (!missing_ok)
|
|
|
|
elog(ERROR, "cache lookup failed for publication %u", pubid);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pubform = (Form_pg_publication) GETSTRUCT(tup);
|
|
|
|
pubname = pstrdup(NameStr(pubform->pubname));
|
|
|
|
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
|
|
|
|
return pubname;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_subscription_oid - given a subscription name, look up the OID
|
|
|
|
*
|
|
|
|
* If missing_ok is false, throw an error if name not found. If true, just
|
|
|
|
* return InvalidOid.
|
|
|
|
*/
|
|
|
|
Oid
|
2023-05-19 23:24:48 +02:00
|
|
|
get_subscription_oid(const char *subname, bool missing_ok)
|
2022-08-02 07:17:22 +02:00
|
|
|
{
|
|
|
|
Oid oid;
|
|
|
|
|
|
|
|
oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
|
2023-05-19 23:24:48 +02:00
|
|
|
MyDatabaseId, CStringGetDatum(subname));
|
2022-08-02 07:17:22 +02:00
|
|
|
if (!OidIsValid(oid) && !missing_ok)
|
|
|
|
ereport(ERROR,
|
2023-05-19 23:24:48 +02:00
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("subscription \"%s\" does not exist", subname)));
|
2022-08-02 07:17:22 +02:00
|
|
|
return oid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_subscription_name - given a subscription OID, look up the name
|
|
|
|
*
|
|
|
|
* If missing_ok is false, throw an error if name not found. If true, just
|
|
|
|
* return NULL.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
get_subscription_name(Oid subid, bool missing_ok)
|
|
|
|
{
|
|
|
|
HeapTuple tup;
|
2023-05-19 23:24:48 +02:00
|
|
|
char *subname;
|
2022-08-02 07:17:22 +02:00
|
|
|
Form_pg_subscription subform;
|
|
|
|
|
|
|
|
tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
|
|
|
|
|
|
|
|
if (!HeapTupleIsValid(tup))
|
|
|
|
{
|
|
|
|
if (!missing_ok)
|
|
|
|
elog(ERROR, "cache lookup failed for subscription %u", subid);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
subform = (Form_pg_subscription) GETSTRUCT(tup);
|
|
|
|
subname = pstrdup(NameStr(subform->subname));
|
|
|
|
|
|
|
|
ReleaseSysCache(tup);
|
|
|
|
|
|
|
|
return subname;
|
|
|
|
}
|