1996-08-28 03:59:28 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* lsyscache.h
|
1999-08-09 05:13:31 +02:00
|
|
|
* Convenience routines for common queries in the system catalog cache.
|
1996-08-28 03:59:28 +02:00
|
|
|
*
|
2020-01-01 18:21:45 +01:00
|
|
|
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-08-28 03:59:28 +02:00
|
|
|
*
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/include/utils/lsyscache.h
|
1996-08-28 03:59:28 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
1997-09-07 07:04:48 +02:00
|
|
|
#ifndef LSYSCACHE_H
|
|
|
|
#define LSYSCACHE_H
|
1996-08-28 03:59:28 +02:00
|
|
|
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "access/attnum.h"
|
1999-07-16 01:04:24 +02:00
|
|
|
#include "access/htup.h"
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "nodes/pg_list.h"
|
1996-08-28 03:59:28 +02:00
|
|
|
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
/* avoid including subscripting.h here */
|
|
|
|
struct SubscriptRoutines;
|
|
|
|
|
2011-07-06 20:53:16 +02:00
|
|
|
/* Result list element for get_op_btree_interpretation */
|
|
|
|
typedef struct OpBtreeInterpretation
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
Oid opfamily_id; /* btree opfamily containing operator */
|
|
|
|
int strategy; /* its strategy number */
|
|
|
|
Oid oplefttype; /* declared left input datatype */
|
|
|
|
Oid oprighttype; /* declared right input datatype */
|
2011-07-06 20:53:16 +02:00
|
|
|
} OpBtreeInterpretation;
|
|
|
|
|
2003-06-27 02:33:26 +02:00
|
|
|
/* I/O function selector for get_type_io_data */
|
|
|
|
typedef enum IOFuncSelector
|
|
|
|
{
|
|
|
|
IOFunc_input,
|
|
|
|
IOFunc_output,
|
|
|
|
IOFunc_receive,
|
|
|
|
IOFunc_send
|
2003-08-08 23:42:59 +02:00
|
|
|
} IOFuncSelector;
|
2003-06-27 02:33:26 +02:00
|
|
|
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
/* Flag bits for get_attstatsslot */
|
|
|
|
#define ATTSTATSSLOT_VALUES 0x01
|
|
|
|
#define ATTSTATSSLOT_NUMBERS 0x02
|
|
|
|
|
|
|
|
/* Result struct for get_attstatsslot */
|
|
|
|
typedef struct AttStatsSlot
|
|
|
|
{
|
|
|
|
/* Always filled: */
|
|
|
|
Oid staop; /* Actual staop for the found slot */
|
Make pg_statistic and related code account more honestly for collations.
When we first put in collations support, we basically punted on teaching
pg_statistic, ANALYZE, and the planner selectivity functions about that.
They've just used DEFAULT_COLLATION_OID independently of the actual
collation of the data. It's time to improve that, so:
* Add columns to pg_statistic that record the specific collation associated
with each statistics slot.
* Teach ANALYZE to use the column's actual collation when comparing values
for statistical purposes, and record this in the appropriate slot. (Note
that type-specific typanalyze functions are now expected to fill
stats->stacoll with the appropriate collation, too.)
* Teach assorted selectivity functions to use the actual collation of
the stats they are looking at, instead of just assuming it's
DEFAULT_COLLATION_OID.
This should give noticeably better results in selectivity estimates for
columns with nondefault collations, at least for query clauses that use
that same collation (which would be the default behavior in most cases).
It's still true that comparisons with explicit COLLATE clauses different
from the stored data's collation won't be well-estimated, but that's no
worse than before. Also, this patch does make the first step towards
doing better with that, which is that it's now theoretically possible to
collect stats for a collation other than the column's own collation.
Patch by me; thanks to Peter Eisentraut for review.
Discussion: https://postgr.es/m/14706.1544630227@sss.pgh.pa.us
2018-12-14 18:52:49 +01:00
|
|
|
Oid stacoll; /* Actual collation for the found slot */
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
/* Filled if ATTSTATSSLOT_VALUES is specified: */
|
|
|
|
Oid valuetype; /* Actual datatype of the values */
|
|
|
|
Datum *values; /* slot's "values" array, or NULL if none */
|
|
|
|
int nvalues; /* length of values[], or 0 */
|
|
|
|
/* Filled if ATTSTATSSLOT_NUMBERS is specified: */
|
|
|
|
float4 *numbers; /* slot's "numbers" array, or NULL if none */
|
|
|
|
int nnumbers; /* length of numbers[], or 0 */
|
|
|
|
|
|
|
|
/* Remaining fields are private to get_attstatsslot/free_attstatsslot */
|
|
|
|
void *values_arr; /* palloc'd values array, if any */
|
|
|
|
void *numbers_arr; /* palloc'd numbers array, if any */
|
|
|
|
} AttStatsSlot;
|
|
|
|
|
2008-09-28 21:51:40 +02:00
|
|
|
/* Hook for plugins to get control in get_attavgwidth() */
|
|
|
|
typedef int32 (*get_attavgwidth_hook_type) (Oid relid, AttrNumber attnum);
|
|
|
|
extern PGDLLIMPORT get_attavgwidth_hook_type get_attavgwidth_hook;
|
|
|
|
|
2006-12-23 01:43:13 +01:00
|
|
|
extern bool op_in_opfamily(Oid opno, Oid opfamily);
|
|
|
|
extern int get_op_opfamily_strategy(Oid opno, Oid opfamily);
|
2010-12-03 02:50:48 +01:00
|
|
|
extern Oid get_op_opfamily_sortfamily(Oid opno, Oid opfamily);
|
|
|
|
extern void get_op_opfamily_properties(Oid opno, Oid opfamily, bool ordering_op,
|
2019-05-22 19:04:48 +02:00
|
|
|
int *strategy,
|
|
|
|
Oid *lefttype,
|
|
|
|
Oid *righttype);
|
|
|
|
extern Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
|
|
|
|
int16 strategy);
|
2007-01-21 01:57:15 +01:00
|
|
|
extern bool get_ordering_op_properties(Oid opno,
|
2019-05-22 19:04:48 +02:00
|
|
|
Oid *opfamily, Oid *opcintype, int16 *strategy);
|
2008-08-02 23:32:01 +02:00
|
|
|
extern Oid get_equality_op_for_ordering_op(Oid opno, bool *reverse);
|
2007-01-10 19:06:05 +01:00
|
|
|
extern Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type);
|
2007-01-20 21:45:41 +01:00
|
|
|
extern List *get_mergejoin_opfamilies(Oid opno);
|
2007-01-30 02:33:36 +01:00
|
|
|
extern bool get_compatible_hash_operators(Oid opno,
|
2019-05-22 19:04:48 +02:00
|
|
|
Oid *lhs_opno, Oid *rhs_opno);
|
2007-01-30 02:33:36 +01:00
|
|
|
extern bool get_op_hash_functions(Oid opno,
|
2019-05-22 19:04:48 +02:00
|
|
|
RegProcedure *lhs_procno, RegProcedure *rhs_procno);
|
2011-07-06 20:53:16 +02:00
|
|
|
extern List *get_op_btree_interpretation(Oid opno);
|
2008-08-02 23:32:01 +02:00
|
|
|
extern bool equality_ops_are_compatible(Oid opno1, Oid opno2);
|
Improve ineq_histogram_selectivity's behavior for non-default orderings.
ineq_histogram_selectivity() can be invoked in situations where the
ordering we care about is not that of the column's histogram. We could
be considering some other collation, or even more drastically, the
query operator might not agree at all with what was used to construct
the histogram. (We'll get here for anything using scalarineqsel-based
estimators, so that's quite likely to happen for extension operators.)
Up to now we just ignored this issue and assumed we were dealing with
an operator/collation whose sort order exactly matches the histogram,
possibly resulting in junk estimates if the binary search gets confused.
It's past time to improve that, since the use of nondefault collations
is increasing. What we can do is verify that the given operator and
collation match what's recorded in pg_statistic, and use the existing
code only if so. When they don't match, instead execute the operator
against each histogram entry, and take the fraction of successes as our
selectivity estimate. This gives an estimate that is probably good to
about 1/histogram_size, with no assumptions about ordering. (The quality
of the estimate is likely to degrade near the ends of the value range,
since the two orderings probably don't agree on what is an extremal value;
but this is surely going to be more reliable than what we did before.)
At some point we might further improve matters by storing more than one
histogram calculated according to different orderings. But this code
would still be good fallback logic when no matches exist, so that is
not an argument for not doing this.
While here, also improve get_variable_range() to deal more honestly
with non-default collations.
This isn't back-patchable, because it requires adding another argument
to ineq_histogram_selectivity, and because it might have significant
impact on the estimation results for extension operators relying on
scalarineqsel --- mostly for the better, one hopes, but in any case
destabilizing plan choices in back branches is best avoided.
Per investigation of a report from James Lucas.
Discussion: https://postgr.es/m/CAAFmbbOvfi=wMM=3qRsPunBSLb8BFREno2oOzSBS=mzfLPKABw@mail.gmail.com
2020-06-05 22:55:16 +02:00
|
|
|
extern bool comparison_ops_are_compatible(Oid opno1, Oid opno2);
|
2019-05-22 19:04:48 +02:00
|
|
|
extern Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype,
|
|
|
|
int16 procnum);
|
2018-02-12 23:30:30 +01:00
|
|
|
extern char *get_attname(Oid relid, AttrNumber attnum, bool missing_ok);
|
2002-08-02 20:15:10 +02:00
|
|
|
extern AttrNumber get_attnum(Oid relid, const char *attname);
|
2019-03-30 08:13:09 +01:00
|
|
|
extern char get_attgenerated(Oid relid, AttrNumber attnum);
|
1997-09-08 04:41:22 +02:00
|
|
|
extern Oid get_atttype(Oid relid, AttrNumber attnum);
|
2011-03-26 19:25:48 +01:00
|
|
|
extern void get_atttypetypmodcoll(Oid relid, AttrNumber attnum,
|
2019-05-22 19:04:48 +02:00
|
|
|
Oid *typid, int32 *typmod, Oid *collid);
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
extern Datum get_attoptions(Oid relid, int16 attnum);
|
2020-03-10 15:28:23 +01:00
|
|
|
extern Oid get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok);
|
2011-02-08 22:04:18 +01:00
|
|
|
extern char *get_collation_name(Oid colloid);
|
2019-03-22 12:09:32 +01:00
|
|
|
extern bool get_collation_isdeterministic(Oid colloid);
|
2007-02-14 02:58:58 +01:00
|
|
|
extern char *get_constraint_name(Oid conoid);
|
2020-12-09 15:12:05 +01:00
|
|
|
extern Oid get_constraint_index(Oid conoid);
|
2015-04-26 16:33:14 +02:00
|
|
|
extern char *get_language_name(Oid langoid, bool missing_ok);
|
2006-12-23 01:43:13 +01:00
|
|
|
extern Oid get_opclass_family(Oid opclass);
|
2005-12-28 02:30:02 +01:00
|
|
|
extern Oid get_opclass_input_type(Oid opclass);
|
2018-09-19 00:54:10 +02:00
|
|
|
extern bool get_opclass_opfamily_and_input_type(Oid opclass,
|
2019-05-22 19:04:48 +02:00
|
|
|
Oid *opfamily, Oid *opcintype);
|
2000-08-13 04:50:35 +02:00
|
|
|
extern RegProcedure get_opcode(Oid opno);
|
|
|
|
extern char *get_opname(Oid opno);
|
2016-01-22 01:47:15 +01:00
|
|
|
extern Oid get_op_rettype(Oid opno);
|
2003-12-03 18:45:10 +01:00
|
|
|
extern void op_input_types(Oid opno, Oid *lefttype, Oid *righttype);
|
2010-10-31 02:55:20 +01:00
|
|
|
extern bool op_mergejoinable(Oid opno, Oid inputtype);
|
|
|
|
extern bool op_hashjoinable(Oid opno, Oid inputtype);
|
2002-12-01 22:05:14 +01:00
|
|
|
extern bool op_strict(Oid opno);
|
2002-04-05 02:31:36 +02:00
|
|
|
extern char op_volatile(Oid opno);
|
2000-08-13 04:50:35 +02:00
|
|
|
extern Oid get_commutator(Oid opno);
|
|
|
|
extern Oid get_negator(Oid opno);
|
|
|
|
extern RegProcedure get_oprrest(Oid opno);
|
|
|
|
extern RegProcedure get_oprjoin(Oid opno);
|
2002-04-27 05:45:03 +02:00
|
|
|
extern char *get_func_name(Oid funcid);
|
2009-08-10 07:46:50 +02:00
|
|
|
extern Oid get_func_namespace(Oid funcid);
|
1999-08-16 04:06:25 +02:00
|
|
|
extern Oid get_func_rettype(Oid funcid);
|
2005-03-29 02:17:27 +02:00
|
|
|
extern int get_func_nargs(Oid funcid);
|
|
|
|
extern Oid get_func_signature(Oid funcid, Oid **argtypes, int *nargs);
|
Support ordered-set (WITHIN GROUP) aggregates.
This patch introduces generic support for ordered-set and hypothetical-set
aggregate functions, as well as implementations of the instances defined in
SQL:2008 (percentile_cont(), percentile_disc(), rank(), dense_rank(),
percent_rank(), cume_dist()). We also added mode() though it is not in the
spec, as well as versions of percentile_cont() and percentile_disc() that
can compute multiple percentile values in one pass over the data.
Unlike the original submission, this patch puts full control of the sorting
process in the hands of the aggregate's support functions. To allow the
support functions to find out how they're supposed to sort, a new API
function AggGetAggref() is added to nodeAgg.c. This allows retrieval of
the aggregate call's Aggref node, which may have other uses beyond the
immediate need. There is also support for ordered-set aggregates to
install cleanup callback functions, so that they can be sure that
infrastructure such as tuplesort objects gets cleaned up.
In passing, make some fixes in the recently-added support for variadic
aggregates, and make some editorial adjustments in the recent FILTER
additions for aggregates. Also, simplify use of IsBinaryCoercible() by
allowing it to succeed whenever the target type is ANY or ANYELEMENT.
It was inconsistent that it dealt with other polymorphic target types
but not these.
Atri Sharma and Andrew Gierth; reviewed by Pavel Stehule and Vik Fearing,
and rather heavily editorialized upon by Tom Lane
2013-12-23 22:11:35 +01:00
|
|
|
extern Oid get_func_variadictype(Oid funcid);
|
2002-09-04 22:31:48 +02:00
|
|
|
extern bool get_func_retset(Oid funcid);
|
2002-12-01 22:05:14 +01:00
|
|
|
extern bool func_strict(Oid funcid);
|
2002-04-05 02:31:36 +02:00
|
|
|
extern char func_volatile(Oid funcid);
|
2015-09-16 21:38:47 +02:00
|
|
|
extern char func_parallel(Oid funcid);
|
2018-03-02 14:57:38 +01:00
|
|
|
extern char get_func_prokind(Oid funcid);
|
2012-02-14 04:20:27 +01:00
|
|
|
extern bool get_func_leakproof(Oid funcid);
|
2019-02-10 00:32:23 +01:00
|
|
|
extern RegProcedure get_func_support(Oid funcid);
|
2002-03-26 20:17:02 +01:00
|
|
|
extern Oid get_relname_relid(const char *relname, Oid relnamespace);
|
1997-09-08 04:41:22 +02:00
|
|
|
extern char *get_rel_name(Oid relid);
|
2002-04-30 03:26:26 +02:00
|
|
|
extern Oid get_rel_namespace(Oid relid);
|
2002-03-22 03:56:37 +01:00
|
|
|
extern Oid get_rel_type_id(Oid relid);
|
2002-09-20 01:40:56 +02:00
|
|
|
extern char get_rel_relkind(Oid relid);
|
Fix assorted bugs in pg_get_partition_constraintdef().
It failed if passed a nonexistent relation OID, or one that was a non-heap
relation, because of blindly applying heap_open to a user-supplied OID.
This is not OK behavior for a SQL-exposed function; we have a project
policy that we should return NULL in such cases. Moreover, since
pg_get_partition_constraintdef ought now to work on indexes, restricting
it to heaps is flat wrong anyway.
The underlying function generate_partition_qual() wasn't on board with
indexes having partition quals either, nor for that matter with rels
having relispartition set but yet null relpartbound. (One wonders
whether the person who wrote the function comment blocks claiming that
these functions allow a missing relpartbound had ever tested it.)
Fix by testing relispartition before opening the rel, and by using
relation_open not heap_open. (If any other relkinds ever grow the
ability to have relispartition set, the code will work with them
automatically.) Also, don't reject null relpartbound in
generate_partition_qual.
Back-patch to v11, and all but the null-relpartbound change to v10.
(It's not really necessary to change generate_partition_qual at all
in v10, but I thought s/heap_open/relation_open/ would be a good
idea anyway just to keep the code in sync with later branches.)
Per report from Justin Pryzby.
Discussion: https://postgr.es/m/20180927200020.GJ776@telsasoft.com
2018-09-28 00:15:06 +02:00
|
|
|
extern bool get_rel_relispartition(Oid relid);
|
2007-10-13 17:55:40 +02:00
|
|
|
extern Oid get_rel_tablespace(Oid relid);
|
Generate parallel sequential scan plans in simple cases.
Add a new flag, consider_parallel, to each RelOptInfo, indicating
whether a plan for that relation could conceivably be run inside of
a parallel worker. Right now, we're pretty conservative: for example,
it might be possible to defer applying a parallel-restricted qual
in a worker, and later do it in the leader, but right now we just
don't try to parallelize access to that relation. That's probably
the right decision in most cases, anyway.
Using the new flag, generate parallel sequential scan plans for plain
baserels, meaning that we now have parallel sequential scan in
PostgreSQL. The logic here is pretty unsophisticated right now: the
costing model probably isn't right in detail, and we can't push joins
beneath Gather nodes, so the number of plans that can actually benefit
from this is pretty limited right now. Lots more work is needed.
Nevertheless, it seems time to enable this functionality so that all
this code can actually be tested easily by users and developers.
Note that, if you wish to test this functionality, it will be
necessary to set max_parallel_degree to a value greater than the
default of 0. Once a few more loose ends have been tidied up here, we
might want to consider changing the default value of this GUC, but
I'm leaving it alone for now.
Along the way, fix a bug in cost_gather: the previous coding thought
that a Gather node's transfer overhead should be costed on the basis of
the relation size rather than the number of tuples that actually need
to be passed off to the leader.
Patch by me, reviewed in earlier versions by Amit Kapila.
2015-11-11 15:02:52 +01:00
|
|
|
extern char get_rel_persistence(Oid relid);
|
2015-05-24 03:35:49 +02:00
|
|
|
extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes);
|
|
|
|
extern Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes);
|
2002-03-29 20:06:29 +01:00
|
|
|
extern bool get_typisdefined(Oid typid);
|
1997-09-08 04:41:22 +02:00
|
|
|
extern int16 get_typlen(Oid typid);
|
|
|
|
extern bool get_typbyval(Oid typid);
|
2000-11-16 23:30:52 +01:00
|
|
|
extern void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval);
|
2002-08-26 19:54:02 +02:00
|
|
|
extern void get_typlenbyvalalign(Oid typid, int16 *typlen, bool *typbyval,
|
2019-05-22 19:04:48 +02:00
|
|
|
char *typalign);
|
2004-06-06 02:41:28 +02:00
|
|
|
extern Oid getTypeIOParam(HeapTuple typeTuple);
|
2003-06-27 02:33:26 +02:00
|
|
|
extern void get_type_io_data(Oid typid,
|
2019-05-22 19:04:48 +02:00
|
|
|
IOFuncSelector which_func,
|
|
|
|
int16 *typlen,
|
|
|
|
bool *typbyval,
|
|
|
|
char *typalign,
|
|
|
|
char *typdelim,
|
|
|
|
Oid *typioparam,
|
|
|
|
Oid *func);
|
2000-11-20 21:36:57 +01:00
|
|
|
extern char get_typstorage(Oid typid);
|
2002-03-20 20:45:13 +01:00
|
|
|
extern Node *get_typdefault(Oid typid);
|
2002-08-05 04:30:50 +02:00
|
|
|
extern char get_typtype(Oid typid);
|
2006-09-28 22:51:43 +02:00
|
|
|
extern bool type_is_rowtype(Oid typid);
|
2007-04-02 05:49:42 +02:00
|
|
|
extern bool type_is_enum(Oid typid);
|
2011-11-03 12:16:28 +01:00
|
|
|
extern bool type_is_range(Oid typid);
|
Replace the hard-wired type knowledge in TypeCategory() and IsPreferredType()
with system catalog lookups, as was foreseen to be necessary almost since
their creation. Instead put the information into two new pg_type columns,
typcategory and typispreferred. Add support for setting these when
creating a user-defined base type.
The category column is just a "char" (i.e. a poor man's enum), allowing
a crude form of user extensibility of the category list: just use an
otherwise-unused character. This seems sufficient for foreseen uses,
but we could upgrade to having an actual category catalog someday, if
there proves to be a huge demand for custom type categories.
In this patch I have attempted to hew exactly to the behavior of the
previous hardwired logic, except for introducing new type categories for
arrays, composites, and enums. In particular the default preferred state
for user-defined types remains TRUE. That seems worth revisiting, but it
should be done as a separate patch from introducing the infrastructure.
Likewise, any adjustment of the standard set of categories should be done
separately.
2008-07-30 19:05:05 +02:00
|
|
|
extern void get_type_category_preferred(Oid typid,
|
2019-05-22 19:04:48 +02:00
|
|
|
char *typcategory,
|
|
|
|
bool *typispreferred);
|
2002-09-20 01:40:56 +02:00
|
|
|
extern Oid get_typ_typrelid(Oid typid);
|
2003-04-09 01:20:04 +02:00
|
|
|
extern Oid get_element_type(Oid typid);
|
|
|
|
extern Oid get_array_type(Oid typid);
|
2014-11-25 18:21:22 +01:00
|
|
|
extern Oid get_promoted_array_type(Oid typid);
|
Improve handling of domains over arrays.
This patch eliminates various bizarre behaviors caused by sloppy thinking
about the difference between a domain type and its underlying array type.
In particular, the operation of updating one element of such an array
has to be considered as yielding a value of the underlying array type,
*not* a value of the domain, because there's no assurance that the
domain's CHECK constraints are still satisfied. If we're intending to
store the result back into a domain column, we have to re-cast to the
domain type so that constraints are re-checked.
For similar reasons, such a domain can't be blindly matched to an ANYARRAY
polymorphic parameter, because the polymorphic function is likely to apply
array-ish operations that could invalidate the domain constraints. For the
moment, we just forbid such matching. We might later wish to insert an
automatic downcast to the underlying array type, but such a change should
also change matching of domains to ANYELEMENT for consistency.
To ensure that all such logic is rechecked, this patch removes the original
hack of setting a domain's pg_type.typelem field to match its base type;
the typelem will always be zero instead. In those places where it's really
okay to look through the domain type with no other logic changes, use the
newly added get_base_element_type function in place of get_element_type.
catversion bumped due to change in pg_type contents.
Per bug #5717 from Richard Huxton and subsequent discussion.
2010-10-21 22:07:17 +02:00
|
|
|
extern Oid get_base_element_type(Oid typid);
|
2004-06-06 02:41:28 +02:00
|
|
|
extern void getTypeInputInfo(Oid type, Oid *typInput, Oid *typIOParam);
|
2005-05-01 20:56:19 +02:00
|
|
|
extern void getTypeOutputInfo(Oid type, Oid *typOutput, bool *typIsVarlena);
|
2004-06-06 02:41:28 +02:00
|
|
|
extern void getTypeBinaryInputInfo(Oid type, Oid *typReceive, Oid *typIOParam);
|
2005-05-01 20:56:19 +02:00
|
|
|
extern void getTypeBinaryOutputInfo(Oid type, Oid *typSend, bool *typIsVarlena);
|
2006-12-30 22:21:56 +01:00
|
|
|
extern Oid get_typmodin(Oid typid);
|
2011-04-10 17:42:00 +02:00
|
|
|
extern Oid get_typcollation(Oid typid);
|
2011-02-08 22:04:18 +01:00
|
|
|
extern bool type_is_collatable(Oid typid);
|
Support subscripting of arbitrary types, not only arrays.
This patch generalizes the subscripting infrastructure so that any
data type can be subscripted, if it provides a handler function to
define what that means. Traditional variable-length (varlena) arrays
all use array_subscript_handler(), while the existing fixed-length
types that support subscripting use raw_array_subscript_handler().
It's expected that other types that want to use subscripting notation
will define their own handlers. (This patch provides no such new
features, though; it only lays the foundation for them.)
To do this, move the parser's semantic processing of subscripts
(including coercion to whatever data type is required) into a
method callback supplied by the handler. On the execution side,
replace the ExecEvalSubscriptingRef* layer of functions with direct
calls to callback-supplied execution routines. (Thus, essentially
no new run-time overhead should be caused by this patch. Indeed,
there is room to remove some overhead by supplying specialized
execution routines. This patch does a little bit in that line,
but more could be done.)
Additional work is required here and there to remove formerly
hard-wired assumptions about the result type, collation, etc
of a SubscriptingRef expression node; and to remove assumptions
that the subscript values must be integers.
One useful side-effect of this is that we now have a less squishy
mechanism for identifying whether a data type is a "true" array:
instead of wiring in weird rules about typlen, we can look to see
if pg_type.typsubscript == F_ARRAY_SUBSCRIPT_HANDLER. For this
to be bulletproof, we have to forbid user-defined types from using
that handler directly; but there seems no good reason for them to
do so.
This patch also removes assumptions that the number of subscripts
is limited to MAXDIM (6), or indeed has any hard-wired limit.
That limit still applies to types handled by array_subscript_handler
or raw_array_subscript_handler, but to discourage other dependencies
on this constant, I've moved it from c.h to utils/array.h.
Dmitry Dolgov, reviewed at various times by Tom Lane, Arthur Zakirov,
Peter Eisentraut, Pavel Stehule
Discussion: https://postgr.es/m/CA+q6zcVDuGBv=M0FqBYX8DPebS3F_0KQ6OVFobGJPM507_SZ_w@mail.gmail.com
Discussion: https://postgr.es/m/CA+q6zcVovR+XY4mfk-7oNk-rF91gH0PebnNfuUjuuDsyHjOcVA@mail.gmail.com
2020-12-09 18:40:37 +01:00
|
|
|
extern RegProcedure get_typsubscript(Oid typid, Oid *typelemp);
|
|
|
|
extern const struct SubscriptRoutines *getSubscriptingRoutines(Oid typid,
|
|
|
|
Oid *typelemp);
|
2002-09-04 22:31:48 +02:00
|
|
|
extern Oid getBaseType(Oid typid);
|
2006-04-06 00:11:58 +02:00
|
|
|
extern Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod);
|
2001-05-09 02:35:09 +02:00
|
|
|
extern int32 get_typavgwidth(Oid typid, int32 typmod);
|
|
|
|
extern int32 get_attavgwidth(Oid relid, AttrNumber attnum);
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
extern bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple,
|
2019-05-22 19:04:48 +02:00
|
|
|
int reqkind, Oid reqop, int flags);
|
Redesign get_attstatsslot()/free_attstatsslot() for more safety and speed.
The mess cleaned up in commit da0759600 is clear evidence that it's a
bug hazard to expect the caller of get_attstatsslot()/free_attstatsslot()
to provide the correct type OID for the array elements in the slot.
Moreover, we weren't even getting any performance benefit from that,
since get_attstatsslot() was extracting the real type OID from the array
anyway. So we ought to get rid of that requirement; indeed, it would
make more sense for get_attstatsslot() to pass back the type OID it found,
in case the caller isn't sure what to expect, which is likely in binary-
compatible-operator cases.
Another problem with the current implementation is that if the stats array
element type is pass-by-reference, we incur a palloc/memcpy/pfree cycle
for each element. That seemed acceptable when the code was written because
we were targeting O(10) array sizes --- but these days, stats arrays are
almost always bigger than that, sometimes much bigger. We can save a
significant number of cycles by doing one palloc/memcpy/pfree of the whole
array. Indeed, in the now-probably-common case where the array is toasted,
that happens anyway so this method is basically free. (Note: although the
catcache code will inline any out-of-line toasted values, it doesn't
decompress them. At the other end of the size range, it doesn't expand
short-header datums either. In either case, DatumGetArrayTypeP would have
to make a copy. We do end up using an extra array copy step if the element
type is pass-by-value and the array length is neither small enough for a
short header nor large enough to have suffered compression. But that
seems like a very acceptable price for winning in pass-by-ref cases.)
Hence, redesign to take these insights into account. While at it,
convert to an API in which we fill a struct rather than passing a bunch
of pointers to individual output arguments. That will make it less
painful if we ever want further expansion of what get_attstatsslot can
pass back.
It's certainly arguable that this is new development and not something to
push post-feature-freeze. However, I view it as primarily bug-proofing
and therefore something that's better to have sooner not later. Since
we aren't quite at beta phase yet, let's put it in.
Discussion: https://postgr.es/m/16364.1494520862@sss.pgh.pa.us
2017-05-13 21:14:39 +02:00
|
|
|
extern void free_attstatsslot(AttStatsSlot *sslot);
|
2002-04-02 03:03:07 +02:00
|
|
|
extern char *get_namespace_name(Oid nspid);
|
2015-04-06 16:40:55 +02:00
|
|
|
extern char *get_namespace_name_or_temp(Oid nspid);
|
2012-06-10 21:20:04 +02:00
|
|
|
extern Oid get_range_subtype(Oid rangeOid);
|
2020-01-31 23:03:55 +01:00
|
|
|
extern Oid get_range_collation(Oid rangeOid);
|
2018-09-19 00:54:10 +02:00
|
|
|
extern Oid get_index_column_opclass(Oid index_oid, int attno);
|
2020-05-14 19:06:38 +02:00
|
|
|
extern bool get_index_isreplident(Oid index_oid);
|
2020-03-10 07:38:17 +01:00
|
|
|
extern bool get_index_isvalid(Oid index_oid);
|
2020-04-06 04:03:49 +02:00
|
|
|
extern bool get_index_isclustered(Oid index_oid);
|
1996-08-28 03:59:28 +02:00
|
|
|
|
2007-06-07 01:00:50 +02:00
|
|
|
#define type_is_array(typid) (get_element_type(typid) != InvalidOid)
|
Improve handling of domains over arrays.
This patch eliminates various bizarre behaviors caused by sloppy thinking
about the difference between a domain type and its underlying array type.
In particular, the operation of updating one element of such an array
has to be considered as yielding a value of the underlying array type,
*not* a value of the domain, because there's no assurance that the
domain's CHECK constraints are still satisfied. If we're intending to
store the result back into a domain column, we have to re-cast to the
domain type so that constraints are re-checked.
For similar reasons, such a domain can't be blindly matched to an ANYARRAY
polymorphic parameter, because the polymorphic function is likely to apply
array-ish operations that could invalidate the domain constraints. For the
moment, we just forbid such matching. We might later wish to insert an
automatic downcast to the underlying array type, but such a change should
also change matching of domains to ANYELEMENT for consistency.
To ensure that all such logic is rechecked, this patch removes the original
hack of setting a domain's pg_type.typelem field to match its base type;
the typelem will always be zero instead. In those places where it's really
okay to look through the domain type with no other logic changes, use the
newly added get_base_element_type function in place of get_element_type.
catversion bumped due to change in pg_type contents.
Per bug #5717 from Richard Huxton and subsequent discussion.
2010-10-21 22:07:17 +02:00
|
|
|
/* type_is_array_domain accepts both plain arrays and domains over arrays */
|
|
|
|
#define type_is_array_domain(typid) (get_base_element_type(typid) != InvalidOid)
|
2003-04-09 01:20:04 +02:00
|
|
|
|
2020-03-04 16:34:25 +01:00
|
|
|
#define TypeIsToastable(typid) (get_typstorage(typid) != TYPSTORAGE_PLAIN)
|
2001-10-28 07:26:15 +01:00
|
|
|
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
#endif /* LSYSCACHE_H */
|