postgresql/src/backend/commands/indexcmds.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

4419 lines
135 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* indexcmds.c
* POSTGRES define and remove index code.
*
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/backend/commands/indexcmds.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/amapi.h"
2019-01-15 00:54:18 +01:00
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/reloptions.h"
#include "access/sysattr.h"
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
#include "access/tableam.h"
#include "access/xact.h"
#include "catalog/catalog.h"
#include "catalog/index.h"
#include "catalog/indexing.h"
#include "catalog/pg_am.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_database.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_namespace.h"
1999-07-16 07:00:38 +02:00
#include "catalog/pg_opclass.h"
#include "catalog/pg_opfamily.h"
#include "catalog/pg_tablespace.h"
#include "catalog/pg_type.h"
#include "commands/comment.h"
2003-06-27 16:45:32 +02:00
#include "commands/dbcommands.h"
#include "commands/defrem.h"
#include "commands/event_trigger.h"
#include "commands/progress.h"
#include "commands/tablecmds.h"
#include "commands/tablespace.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/optimizer.h"
#include "parser/parse_coerce.h"
#include "parser/parse_func.h"
#include "parser/parse_oper.h"
#include "partitioning/partdesc.h"
#include "pgstat.h"
#include "rewrite/rewriteManip.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/sinvaladt.h"
#include "utils/acl.h"
1999-07-16 07:00:38 +02:00
#include "utils/builtins.h"
#include "utils/fmgroids.h"
Split up guc.c for better build speed and ease of maintenance. guc.c has grown to be one of our largest .c files, making it a bottleneck for compilation. It's also acquired a bunch of knowledge that'd be better kept elsewhere, because of our not very good habit of putting variable-specific check hooks here. Hence, split it up along these lines: * guc.c itself retains just the core GUC housekeeping mechanisms. * New file guc_funcs.c contains the SET/SHOW interfaces and some SQL-accessible functions for GUC manipulation. * New file guc_tables.c contains the data arrays that define the built-in GUC variables, along with some already-exported constant tables. * GUC check/assign/show hook functions are moved to the variable's home module, whenever that's clearly identifiable. A few hard- to-classify hooks ended up in commands/variable.c, which was already a home for miscellaneous GUC hook functions. To avoid cluttering a lot more header files with #include "guc.h", I also invented a new header file utils/guc_hooks.h and put all the GUC hook functions' declarations there, regardless of their originating module. That allowed removal of #include "guc.h" from some existing headers. The fallout from that (hopefully all caught here) demonstrates clearly why such inclusions are best minimized: there are a lot of files that, for example, were getting array.h at two or more levels of remove, despite not having any connection at all to GUCs in themselves. There is some very minor code beautification here, such as renaming a couple of inconsistently-named hook functions and improving some comments. But mostly this just moves code from point A to point B and deals with the ensuing needs for #include adjustments and exporting a few functions that previously weren't exported. Patch by me, per a suggestion from Andres Freund; thanks also to Michael Paquier for the idea to invent guc_funcs.c. Discussion: https://postgr.es/m/587607.1662836699@sss.pgh.pa.us
2022-09-13 17:05:07 +02:00
#include "utils/guc.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/partcache.h"
#include "utils/pg_rusage.h"
#include "utils/regproc.h"
#include "utils/snapmgr.h"
1999-07-16 07:00:38 +02:00
#include "utils/syscache.h"
/* non-export function prototypes */
static bool CompareOpclassOptions(Datum *opts1, Datum *opts2, int natts);
static void CheckPredicate(Expr *predicate);
static void ComputeIndexAttrs(IndexInfo *indexInfo,
Oid *typeOidP,
Oid *collationOidP,
Oid *classOidP,
int16 *colOptionP,
List *attList,
List *exclusionOpNames,
Oid relId,
const char *accessMethodName, Oid accessMethodId,
bool amcanorder,
bool isconstraint,
Oid ddl_userid,
int ddl_sec_context,
int *ddl_save_nestlevel);
static char *ChooseIndexName(const char *tabname, Oid namespaceId,
List *colnames, List *exclusionOpNames,
bool primary, bool isconstraint);
static char *ChooseIndexNameAddition(List *colnames);
static List *ChooseIndexColumnNames(List *indexElems);
static void ReindexIndex(RangeVar *indexRelation, ReindexParams *params,
bool isTopLevel);
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
Oid relId, Oid oldRelId, void *arg);
static Oid ReindexTable(RangeVar *relation, ReindexParams *params,
bool isTopLevel);
static void ReindexMultipleTables(const char *objectName,
ReindexObjectType objectKind, ReindexParams *params);
static void reindex_error_callback(void *arg);
static void ReindexPartitions(Oid relid, ReindexParams *params,
bool isTopLevel);
static void ReindexMultipleInternal(List *relids,
ReindexParams *params);
static bool ReindexRelationConcurrently(Oid relationOid,
ReindexParams *params);
static void update_relispartition(Oid relationId, bool newval);
static inline void set_indexsafe_procflags(void);
/*
* callback argument type for RangeVarCallbackForReindexIndex()
*/
struct ReindexIndexCallbackState
{
ReindexParams params; /* options from statement */
Oid locked_table_oid; /* tracks previously locked table */
};
2020-09-08 03:09:22 +02:00
/*
* callback arguments for reindex_error_callback()
*/
typedef struct ReindexErrorInfo
{
char *relname;
char *relnamespace;
char relkind;
} ReindexErrorInfo;
/*
* CheckIndexCompatible
* Determine whether an existing index definition is compatible with a
* prospective index definition, such that the existing index storage
* could become the storage of the new index, avoiding a rebuild.
*
* 'oldId': the OID of the existing index
* 'accessMethodName': name of the AM to use.
* 'attributeList': a list of IndexElem specifying columns and expressions
* to index on.
* 'exclusionOpNames': list of names of exclusion-constraint operators,
* or NIL if not an exclusion constraint.
*
* This is tailored to the needs of ALTER TABLE ALTER TYPE, which recreates
* any indexes that depended on a changing column from their pg_get_indexdef
* or pg_get_constraintdef definitions. We omit some of the sanity checks of
* DefineIndex. We assume that the old and new indexes have the same number
* of columns and that if one has an expression column or predicate, both do.
* Errors arising from the attribute list still apply.
*
* Most column type changes that can skip a table rewrite do not invalidate
* indexes. We acknowledge this when all operator classes, collations and
* exclusion operators match. Though we could further permit intra-opfamily
* changes for btree and hash indexes, that adds subtle complexity with no
* concrete benefit for core types. Note, that INCLUDE columns aren't
* checked by this function, for them it's enough that table rewrite is
* skipped.
*
* When a comparison or exclusion operator has a polymorphic input type, the
* actual input types must also match. This defends against the possibility
* that operators could vary behavior in response to get_fn_expr_argtype().
* At present, this hazard is theoretical: check_exclusion_constraint() and
* all core index access methods decline to set fn_expr for such calls.
*
* We do not yet implement a test to verify compatibility of expression
* columns or predicates, so assume any such index is incompatible.
*/
bool
CheckIndexCompatible(Oid oldId,
const char *accessMethodName,
List *attributeList,
List *exclusionOpNames)
{
bool isconstraint;
Oid *typeObjectId;
Oid *collationObjectId;
Oid *classObjectId;
Oid accessMethodId;
Oid relationId;
HeapTuple tuple;
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY. Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor choice of catalog state representation. The pg_index state for an index that's reached the final pre-drop stage was the same as the state for an index just created by CREATE INDEX CONCURRENTLY. This meant that the (necessary) change to make RelationGetIndexList ignore about-to-die indexes also made it ignore freshly-created indexes; which is catastrophic because the latter do need to be considered in HOT-safety decisions. Failure to do so leads to incorrect index entries and subsequently wrong results from queries depending on the concurrently-created index. To fix, add an additional boolean column "indislive" to pg_index, so that the freshly-created and about-to-die states can be distinguished. (This change obviously is only possible in HEAD. This patch will need to be back-patched, but in 9.2 we'll use a kluge consisting of overloading the formerly-impossible state of indisvalid = true and indisready = false.) In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index flag changes they make without exclusive lock on the index are made via heap_inplace_update() rather than a normal transactional update. The latter is not very safe because moving the pg_index tuple could result in concurrent SnapshotNow scans finding it twice or not at all, thus possibly resulting in index corruption. This is a pre-existing bug in CREATE INDEX CONCURRENTLY, which was copied into the DROP code. In addition, fix various places in the code that ought to check to make sure that the indexes they are manipulating are valid and/or ready as appropriate. These represent bugs that have existed since 8.2, since a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid index behind, and we ought not try to do anything that might fail with such an index. Also fix RelationReloadIndexInfo to ensure it copies all the pg_index columns that are allowed to change after initial creation. Previously we could have been left with stale values of some fields in an index relcache entry. It's not clear whether this actually had any user-visible consequences, but it's at least a bug waiting to happen. In addition, do some code and docs review for DROP INDEX CONCURRENTLY; some cosmetic code cleanup but mostly addition and revision of comments. This will need to be back-patched, but in a noticeably different form, so I'm committing it to HEAD before working on the back-patch. Problem reported by Amit Kapila, diagnosis by Pavan Deolassee, fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
Form_pg_index indexForm;
Form_pg_am accessMethodForm;
IndexAmRoutine *amRoutine;
bool amcanorder;
bool amsummarizing;
int16 *coloptions;
IndexInfo *indexInfo;
int numberOfAttributes;
int old_natts;
bool ret = true;
oidvector *old_indclass;
oidvector *old_indcollation;
Relation irel;
int i;
Datum d;
/* Caller should already have the relation locked in some way. */
relationId = IndexGetRelation(oldId, false);
/*
* We can pretend isconstraint = false unconditionally. It only serves to
* decide the text of an error message that should never happen for us.
*/
isconstraint = false;
numberOfAttributes = list_length(attributeList);
Assert(numberOfAttributes > 0);
Assert(numberOfAttributes <= INDEX_MAX_KEYS);
/* look up the access method */
tuple = SearchSysCache1(AMNAME, PointerGetDatum(accessMethodName));
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("access method \"%s\" does not exist",
accessMethodName)));
accessMethodForm = (Form_pg_am) GETSTRUCT(tuple);
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
accessMethodId = accessMethodForm->oid;
amRoutine = GetIndexAmRoutine(accessMethodForm->amhandler);
ReleaseSysCache(tuple);
amcanorder = amRoutine->amcanorder;
amsummarizing = amRoutine->amsummarizing;
/*
* Compute the operator classes, collations, and exclusion operators for
* the new index, so we can test whether it's compatible with the existing
* one. Note that ComputeIndexAttrs might fail here, but that's OK:
* DefineIndex would have failed later. Our attributeList contains only
* key attributes, thus we're filling ii_NumIndexAttrs and
* ii_NumIndexKeyAttrs with same value.
*/
indexInfo = makeIndexInfo(numberOfAttributes, numberOfAttributes,
accessMethodId, NIL, NIL, false, false,
false, false, amsummarizing);
typeObjectId = palloc_array(Oid, numberOfAttributes);
collationObjectId = palloc_array(Oid, numberOfAttributes);
classObjectId = palloc_array(Oid, numberOfAttributes);
coloptions = palloc_array(int16, numberOfAttributes);
ComputeIndexAttrs(indexInfo,
typeObjectId, collationObjectId, classObjectId,
coloptions, attributeList,
exclusionOpNames, relationId,
accessMethodName, accessMethodId,
amcanorder, isconstraint, InvalidOid, 0, NULL);
/* Get the soon-obsolete pg_index tuple. */
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(oldId));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u", oldId);
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY. Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor choice of catalog state representation. The pg_index state for an index that's reached the final pre-drop stage was the same as the state for an index just created by CREATE INDEX CONCURRENTLY. This meant that the (necessary) change to make RelationGetIndexList ignore about-to-die indexes also made it ignore freshly-created indexes; which is catastrophic because the latter do need to be considered in HOT-safety decisions. Failure to do so leads to incorrect index entries and subsequently wrong results from queries depending on the concurrently-created index. To fix, add an additional boolean column "indislive" to pg_index, so that the freshly-created and about-to-die states can be distinguished. (This change obviously is only possible in HEAD. This patch will need to be back-patched, but in 9.2 we'll use a kluge consisting of overloading the formerly-impossible state of indisvalid = true and indisready = false.) In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index flag changes they make without exclusive lock on the index are made via heap_inplace_update() rather than a normal transactional update. The latter is not very safe because moving the pg_index tuple could result in concurrent SnapshotNow scans finding it twice or not at all, thus possibly resulting in index corruption. This is a pre-existing bug in CREATE INDEX CONCURRENTLY, which was copied into the DROP code. In addition, fix various places in the code that ought to check to make sure that the indexes they are manipulating are valid and/or ready as appropriate. These represent bugs that have existed since 8.2, since a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid index behind, and we ought not try to do anything that might fail with such an index. Also fix RelationReloadIndexInfo to ensure it copies all the pg_index columns that are allowed to change after initial creation. Previously we could have been left with stale values of some fields in an index relcache entry. It's not clear whether this actually had any user-visible consequences, but it's at least a bug waiting to happen. In addition, do some code and docs review for DROP INDEX CONCURRENTLY; some cosmetic code cleanup but mostly addition and revision of comments. This will need to be back-patched, but in a noticeably different form, so I'm committing it to HEAD before working on the back-patch. Problem reported by Amit Kapila, diagnosis by Pavan Deolassee, fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
indexForm = (Form_pg_index) GETSTRUCT(tuple);
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY. Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor choice of catalog state representation. The pg_index state for an index that's reached the final pre-drop stage was the same as the state for an index just created by CREATE INDEX CONCURRENTLY. This meant that the (necessary) change to make RelationGetIndexList ignore about-to-die indexes also made it ignore freshly-created indexes; which is catastrophic because the latter do need to be considered in HOT-safety decisions. Failure to do so leads to incorrect index entries and subsequently wrong results from queries depending on the concurrently-created index. To fix, add an additional boolean column "indislive" to pg_index, so that the freshly-created and about-to-die states can be distinguished. (This change obviously is only possible in HEAD. This patch will need to be back-patched, but in 9.2 we'll use a kluge consisting of overloading the formerly-impossible state of indisvalid = true and indisready = false.) In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index flag changes they make without exclusive lock on the index are made via heap_inplace_update() rather than a normal transactional update. The latter is not very safe because moving the pg_index tuple could result in concurrent SnapshotNow scans finding it twice or not at all, thus possibly resulting in index corruption. This is a pre-existing bug in CREATE INDEX CONCURRENTLY, which was copied into the DROP code. In addition, fix various places in the code that ought to check to make sure that the indexes they are manipulating are valid and/or ready as appropriate. These represent bugs that have existed since 8.2, since a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid index behind, and we ought not try to do anything that might fail with such an index. Also fix RelationReloadIndexInfo to ensure it copies all the pg_index columns that are allowed to change after initial creation. Previously we could have been left with stale values of some fields in an index relcache entry. It's not clear whether this actually had any user-visible consequences, but it's at least a bug waiting to happen. In addition, do some code and docs review for DROP INDEX CONCURRENTLY; some cosmetic code cleanup but mostly addition and revision of comments. This will need to be back-patched, but in a noticeably different form, so I'm committing it to HEAD before working on the back-patch. Problem reported by Amit Kapila, diagnosis by Pavan Deolassee, fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
/*
* We don't assess expressions or predicates; assume incompatibility.
* Also, if the index is invalid for any reason, treat it as incompatible.
*/
if (!(heap_attisnull(tuple, Anum_pg_index_indpred, NULL) &&
heap_attisnull(tuple, Anum_pg_index_indexprs, NULL) &&
indexForm->indisvalid))
{
ReleaseSysCache(tuple);
return false;
}
/* Any change in operator class or collation breaks compatibility. */
old_natts = indexForm->indnkeyatts;
Assert(old_natts == numberOfAttributes);
d = SysCacheGetAttrNotNull(INDEXRELID, tuple, Anum_pg_index_indcollation);
old_indcollation = (oidvector *) DatumGetPointer(d);
d = SysCacheGetAttrNotNull(INDEXRELID, tuple, Anum_pg_index_indclass);
old_indclass = (oidvector *) DatumGetPointer(d);
ret = (memcmp(old_indclass->values, classObjectId,
old_natts * sizeof(Oid)) == 0 &&
memcmp(old_indcollation->values, collationObjectId,
old_natts * sizeof(Oid)) == 0);
ReleaseSysCache(tuple);
if (!ret)
return false;
/* For polymorphic opcintype, column type changes break compatibility. */
irel = index_open(oldId, AccessShareLock); /* caller probably has a lock */
for (i = 0; i < old_natts; i++)
{
if (IsPolymorphicType(get_opclass_input_type(classObjectId[i])) &&
TupleDescAttr(irel->rd_att, i)->atttypid != typeObjectId[i])
{
ret = false;
break;
}
}
Implement operator class parameters PostgreSQL provides set of template index access methods, where opclasses have much freedom in the semantics of indexing. These index AMs are GiST, GIN, SP-GiST and BRIN. There opclasses define representation of keys, operations on them and supported search strategies. So, it's natural that opclasses may be faced some tradeoffs, which require user-side decision. This commit implements opclass parameters allowing users to set some values, which tell opclass how to index the particular dataset. This commit doesn't introduce new storage in system catalog. Instead it uses pg_attribute.attoptions, which is used for table column storage options but unused for index attributes. In order to evade changing signature of each opclass support function, we implement unified way to pass options to opclass support functions. Options are set to fn_expr as the constant bytea expression. It's possible due to the fact that opclass support functions are executed outside of expressions, so fn_expr is unused for them. This commit comes with some examples of opclass options usage. We parametrize signature length in GiST. That applies to multiple opclasses: tsvector_ops, gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and gist_hstore_ops. Also we parametrize maximum number of integer ranges for gist__int_ops. However, the main future usage of this feature is expected to be json, where users would be able to specify which way to index particular json parts. Catversion is bumped. Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru Author: Nikita Glukhov, revised by me Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
/* Any change in opclass options break compatibility. */
if (ret)
{
Datum *opclassOptions = RelationGetIndexRawAttOptions(irel);
ret = CompareOpclassOptions(opclassOptions,
indexInfo->ii_OpclassOptions, old_natts);
if (opclassOptions)
pfree(opclassOptions);
}
/* Any change in exclusion operator selections breaks compatibility. */
if (ret && indexInfo->ii_ExclusionOps != NULL)
{
Oid *old_operators,
*old_procs;
uint16 *old_strats;
RelationGetExclusionInfo(irel, &old_operators, &old_procs, &old_strats);
ret = memcmp(old_operators, indexInfo->ii_ExclusionOps,
old_natts * sizeof(Oid)) == 0;
/* Require an exact input type match for polymorphic operators. */
if (ret)
{
for (i = 0; i < old_natts && ret; i++)
{
Oid left,
right;
op_input_types(indexInfo->ii_ExclusionOps[i], &left, &right);
if ((IsPolymorphicType(left) || IsPolymorphicType(right)) &&
TupleDescAttr(irel->rd_att, i)->atttypid != typeObjectId[i])
{
ret = false;
break;
}
}
}
}
index_close(irel, NoLock);
return ret;
}
Implement operator class parameters PostgreSQL provides set of template index access methods, where opclasses have much freedom in the semantics of indexing. These index AMs are GiST, GIN, SP-GiST and BRIN. There opclasses define representation of keys, operations on them and supported search strategies. So, it's natural that opclasses may be faced some tradeoffs, which require user-side decision. This commit implements opclass parameters allowing users to set some values, which tell opclass how to index the particular dataset. This commit doesn't introduce new storage in system catalog. Instead it uses pg_attribute.attoptions, which is used for table column storage options but unused for index attributes. In order to evade changing signature of each opclass support function, we implement unified way to pass options to opclass support functions. Options are set to fn_expr as the constant bytea expression. It's possible due to the fact that opclass support functions are executed outside of expressions, so fn_expr is unused for them. This commit comes with some examples of opclass options usage. We parametrize signature length in GiST. That applies to multiple opclasses: tsvector_ops, gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and gist_hstore_ops. Also we parametrize maximum number of integer ranges for gist__int_ops. However, the main future usage of this feature is expected to be json, where users would be able to specify which way to index particular json parts. Catversion is bumped. Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru Author: Nikita Glukhov, revised by me Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
/*
* CompareOpclassOptions
*
* Compare per-column opclass options which are represented by arrays of text[]
* datums. Both elements of arrays and array themselves can be NULL.
*/
static bool
CompareOpclassOptions(Datum *opts1, Datum *opts2, int natts)
{
int i;
if (!opts1 && !opts2)
return true;
for (i = 0; i < natts; i++)
{
Datum opt1 = opts1 ? opts1[i] : (Datum) 0;
Datum opt2 = opts2 ? opts2[i] : (Datum) 0;
if (opt1 == (Datum) 0)
{
if (opt2 == (Datum) 0)
continue;
else
return false;
}
else if (opt2 == (Datum) 0)
return false;
/* Compare non-NULL text[] datums. */
if (!DatumGetBool(DirectFunctionCall2(array_eq, opt1, opt2)))
return false;
}
return true;
}
/*
* WaitForOlderSnapshots
*
* Wait for transactions that might have an older snapshot than the given xmin
* limit, because it might not contain tuples deleted just before it has
* been taken. Obtain a list of VXIDs of such transactions, and wait for them
* individually. This is used when building an index concurrently.
*
* We can exclude any running transactions that have xmin > the xmin given;
* their oldest snapshot must be newer than our xmin limit.
* We can also exclude any transactions that have xmin = zero, since they
* evidently have no live snapshot at all (and any one they might be in
* process of taking is certainly newer than ours). Transactions in other
* DBs can be ignored too, since they'll never even be able to see the
* index being worked on.
*
* We can also exclude autovacuum processes and processes running manual
* lazy VACUUMs, because they won't be fazed by missing index entries
* either. (Manual ANALYZEs, however, can't be excluded because they
* might be within transactions that are going to do arbitrary operations
* later.) Processes running CREATE INDEX CONCURRENTLY or REINDEX CONCURRENTLY
* on indexes that are neither expressional nor partial are also safe to
* ignore, since we know that those processes won't examine any data
* outside the table they're indexing.
*
* Also, GetCurrentVirtualXIDs never reports our own vxid, so we need not
* check for that.
*
* If a process goes idle-in-transaction with xmin zero, we do not need to
* wait for it anymore, per the above argument. We do not have the
* infrastructure right now to stop waiting if that happens, but we can at
* least avoid the folly of waiting when it is idle at the time we would
* begin to wait. We do this by repeatedly rechecking the output of
* GetCurrentVirtualXIDs. If, during any iteration, a particular vxid
* doesn't show up in the output, we know we can forget about it.
*/
ALTER TABLE ... DETACH PARTITION ... CONCURRENTLY Allow a partition be detached from its partitioned table without blocking concurrent queries, by running in two transactions and only requiring ShareUpdateExclusive in the partitioned table. Because it runs in two transactions, it cannot be used in a transaction block. This is the main reason to use dedicated syntax: so that users can choose to use the original mode if they need it. But also, it doesn't work when a default partition exists (because an exclusive lock would still need to be obtained on it, in order to change its partition constraint.) In case the second transaction is cancelled or a crash occurs, there's ALTER TABLE .. DETACH PARTITION .. FINALIZE, which executes the final steps. The main trick to make this work is the addition of column pg_inherits.inhdetachpending, initially false; can only be set true in the first part of this command. Once that is committed, concurrent transactions that use a PartitionDirectory will include or ignore partitions so marked: in optimizer they are ignored if the row is marked committed for the snapshot; in executor they are always included. As a result, and because of the way PartitionDirectory caches partition descriptors, queries that were planned before the detach will see the rows in the detached partition and queries that are planned after the detach, won't. A CHECK constraint is created that duplicates the partition constraint. This is probably not strictly necessary, and some users will prefer to remove it afterwards, but if the partition is re-attached to a partitioned table, the constraint needn't be rechecked. Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Reviewed-by: Amit Langote <amitlangote09@gmail.com> Reviewed-by: Justin Pryzby <pryzby@telsasoft.com> Discussion: https://postgr.es/m/20200803234854.GA24158@alvherre.pgsql
2021-03-25 22:00:28 +01:00
void
WaitForOlderSnapshots(TransactionId limitXmin, bool progress)
{
int n_old_snapshots;
int i;
VirtualTransactionId *old_snapshots;
old_snapshots = GetCurrentVirtualXIDs(limitXmin, true, false,
PROC_IS_AUTOVACUUM | PROC_IN_VACUUM
| PROC_IN_SAFE_IC,
&n_old_snapshots);
if (progress)
pgstat_progress_update_param(PROGRESS_WAITFOR_TOTAL, n_old_snapshots);
for (i = 0; i < n_old_snapshots; i++)
{
if (!VirtualTransactionIdIsValid(old_snapshots[i]))
continue; /* found uninteresting in previous cycle */
if (i > 0)
{
/* see if anything's changed ... */
VirtualTransactionId *newer_snapshots;
int n_newer_snapshots;
int j;
int k;
newer_snapshots = GetCurrentVirtualXIDs(limitXmin,
true, false,
PROC_IS_AUTOVACUUM | PROC_IN_VACUUM
| PROC_IN_SAFE_IC,
&n_newer_snapshots);
for (j = i; j < n_old_snapshots; j++)
{
if (!VirtualTransactionIdIsValid(old_snapshots[j]))
continue; /* found uninteresting in previous cycle */
for (k = 0; k < n_newer_snapshots; k++)
{
if (VirtualTransactionIdEquals(old_snapshots[j],
newer_snapshots[k]))
break;
}
if (k >= n_newer_snapshots) /* not there anymore */
SetInvalidVirtualTransactionId(old_snapshots[j]);
}
pfree(newer_snapshots);
}
if (VirtualTransactionIdIsValid(old_snapshots[i]))
{
/* If requested, publish who we're going to wait for. */
if (progress)
{
PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
if (holder)
pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
holder->pid);
}
VirtualXactLock(old_snapshots[i], true);
}
if (progress)
pgstat_progress_update_param(PROGRESS_WAITFOR_DONE, i + 1);
}
}
/*
* DefineIndex
* Creates a new index.
*
* This function manages the current userid according to the needs of pg_dump.
* Recreating old-database catalog entries in new-database is fine, regardless
* of which users would have permission to recreate those entries now. That's
* just preservation of state. Running opaque expressions, like calling a
* function named in a catalog entry or evaluating a pg_node_tree in a catalog
* entry, as anyone other than the object owner, is not fine. To adhere to
* those principles and to remain fail-safe, use the table owner userid for
* most ACL checks. Use the original userid for ACL checks reached without
* traversing opaque expressions. (pg_dump can predict such ACL checks from
* catalogs.) Overall, this is a mess. Future DDL development should
* consider offering one DDL command for catalog setup and a separate DDL
* command for steps that run opaque expressions.
*
* 'relationId': the OID of the heap relation on which the index is to be
* created
* 'stmt': IndexStmt describing the properties of the new index.
* 'indexRelationId': normally InvalidOid, but during bootstrap can be
* nonzero to specify a preselected OID for the index.
* 'parentIndexId': the OID of the parent index; InvalidOid if not the child
* of a partitioned index.
* 'parentConstraintId': the OID of the parent constraint; InvalidOid if not
* the child of a constraint (only used when recursing)
* 'total_parts': total number of direct and indirect partitions of relation;
* pass -1 if not known or rel is not partitioned.
* 'is_alter_table': this is due to an ALTER rather than a CREATE operation.
* 'check_rights': check for CREATE rights in namespace and tablespace. (This
* should be true except when ALTER is deleting/recreating an index.)
* 'check_not_in_use': check for table not already in use in current session.
* This should be true unless caller is holding the table open, in which
* case the caller had better have checked it earlier.
* 'skip_build': make the catalog entries but don't create the index files
* 'quiet': suppress the NOTICE chatter ordinarily provided for constraints.
*
* Returns the object address of the created index.
*/
ObjectAddress
DefineIndex(Oid relationId,
IndexStmt *stmt,
Oid indexRelationId,
Oid parentIndexId,
Oid parentConstraintId,
int total_parts,
bool is_alter_table,
bool check_rights,
bool check_not_in_use,
bool skip_build,
bool quiet)
{
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
bool concurrent;
char *indexRelationName;
char *accessMethodName;
Oid *typeObjectId;
Oid *collationObjectId;
Oid *classObjectId;
Oid accessMethodId;
Oid namespaceId;
Oid tablespaceId;
Oid createdConstraintId = InvalidOid;
List *indexColNames;
List *allIndexParams;
Relation rel;
HeapTuple tuple;
Form_pg_am accessMethodForm;
IndexAmRoutine *amRoutine;
bool amcanorder;
bool amissummarizing;
amoptions_function amoptions;
bool partitioned;
bool safe_index;
Datum reloptions;
int16 *coloptions;
IndexInfo *indexInfo;
bits16 flags;
bits16 constr_flags;
int numberOfAttributes;
int numberOfKeyAttributes;
TransactionId limitXmin;
ObjectAddress address;
LockRelId heaprelid;
LOCKTAG heaplocktag;
LOCKMODE lockmode;
Snapshot snapshot;
Oid root_save_userid;
int root_save_sec_context;
int root_save_nestlevel;
root_save_nestlevel = NewGUCNestLevel();
/*
* Some callers need us to run with an empty default_tablespace; this is a
* necessary hack to be able to reproduce catalog state accurately when
* recreating indexes after table-rewriting ALTER TABLE.
*/
if (stmt->reset_default_tblspc)
(void) set_config_option("default_tablespace", "",
PGC_USERSET, PGC_S_SESSION,
GUC_ACTION_SAVE, true, 0, false);
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
/*
* Force non-concurrent build on temporary relations, even if CONCURRENTLY
* was requested. Other backends can't access a temporary relation, so
* there's no harm in grabbing a stronger lock, and a non-concurrent DROP
* is more efficient. Do this before any use of the concurrent option is
* done.
*/
if (stmt->concurrent && get_rel_persistence(relationId) != RELPERSISTENCE_TEMP)
concurrent = true;
else
concurrent = false;
/*
* Start progress report. If we're building a partition, this was already
* done.
*/
if (!OidIsValid(parentIndexId))
{
pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX,
relationId);
pgstat_progress_update_param(PROGRESS_CREATEIDX_COMMAND,
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
concurrent ?
PROGRESS_CREATEIDX_COMMAND_CREATE_CONCURRENTLY :
PROGRESS_CREATEIDX_COMMAND_CREATE);
}
/*
* No index OID to report yet
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_INDEX_OID,
InvalidOid);
/*
* count key attributes in index
*/
numberOfKeyAttributes = list_length(stmt->indexParams);
/*
* Calculate the new list of index columns including both key columns and
* INCLUDE columns. Later we can determine which of these are key
* columns, and which are just part of the INCLUDE list by checking the
* list position. A list item in a position less than ii_NumIndexKeyAttrs
* is part of the key columns, and anything equal to and over is part of
* the INCLUDE columns.
*/
allIndexParams = list_concat_copy(stmt->indexParams,
stmt->indexIncludingParams);
numberOfAttributes = list_length(allIndexParams);
if (numberOfKeyAttributes <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("must specify at least one column")));
if (numberOfAttributes > INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
errmsg("cannot use more than %d columns in an index",
INDEX_MAX_KEYS)));
/*
* Only SELECT ... FOR UPDATE/SHARE are allowed while doing a standard
* index build; but for concurrent builds we allow INSERT/UPDATE/DELETE
* (but not VACUUM).
*
* NB: Caller is responsible for making sure that relationId refers to the
* relation on which the index should be built; except in bootstrap mode,
* this will typically require the caller to have already locked the
* relation. To avoid lock upgrade hazards, that lock should be at least
* as strong as the one we take here.
*
* NB: If the lock strength here ever changes, code that is run by
* parallel workers under the control of certain particular ambuild
* functions will need to be updated, too.
*/
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
lockmode = concurrent ? ShareUpdateExclusiveLock : ShareLock;
rel = table_open(relationId, lockmode);
/*
* Switch to the table owner's userid, so that any index functions are run
* as that user. Also lock down security-restricted operations. We
* already arranged to make GUC variable changes local to this command.
*/
GetUserIdAndSecContext(&root_save_userid, &root_save_sec_context);
SetUserIdAndSecContext(rel->rd_rel->relowner,
root_save_sec_context | SECURITY_RESTRICTED_OPERATION);
namespaceId = RelationGetNamespace(rel);
/* Ensure that it makes sense to index this kind of relation */
switch (rel->rd_rel->relkind)
{
case RELKIND_RELATION:
case RELKIND_MATVIEW:
case RELKIND_PARTITIONED_TABLE:
/* OK */
break;
default:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot create index on relation \"%s\"",
RelationGetRelationName(rel)),
errdetail_relkind_not_supported(rel->rd_rel->relkind)));
break;
}
/*
* Establish behavior for partitioned tables, and verify sanity of
* parameters.
*
* We do not build an actual index in this case; we only create a few
* catalog entries. The actual indexes are built by recursing for each
* partition.
*/
partitioned = rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
if (partitioned)
{
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
/*
* Note: we check 'stmt->concurrent' rather than 'concurrent', so that
* the error is thrown also for temporary tables. Seems better to be
* consistent, even though we could do it on temporary table because
* we're not actually doing it concurrently.
*/
if (stmt->concurrent)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot create index on partitioned table \"%s\" concurrently",
RelationGetRelationName(rel))));
if (stmt->excludeOpNames)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot create exclusion constraints on partitioned table \"%s\"",
RelationGetRelationName(rel))));
}
/*
* Don't try to CREATE INDEX on temp tables of other backends.
*/
if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot create indexes on temporary tables of other sessions")));
/*
* Unless our caller vouches for having checked this already, insist that
* the table not be in use by our own session, either. Otherwise we might
* fail to make entries in the new index (for instance, if an INSERT or
* UPDATE is in progress and has already made its list of target indexes).
*/
if (check_not_in_use)
CheckTableNotInUse(rel, "CREATE INDEX");
/*
* Verify we (still) have CREATE rights in the rel's namespace.
* (Presumably we did when the rel was created, but maybe not anymore.)
* Skip check if caller doesn't want it. Also skip check if
* bootstrapping, since permissions machinery may not be working yet.
*/
if (check_rights && !IsBootstrapProcessingMode())
{
AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid,
ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
}
/*
* Select tablespace to use. If not specified, use default tablespace
* (which may in turn default to database's default).
*/
if (stmt->tableSpace)
{
tablespaceId = get_tablespace_oid(stmt->tableSpace, false);
if (partitioned && tablespaceId == MyDatabaseTableSpace)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2019-04-30 16:00:38 +02:00
errmsg("cannot specify default tablespace for partitioned relations")));
}
else
{
tablespaceId = GetDefaultTablespace(rel->rd_rel->relpersistence,
partitioned);
/* note InvalidOid is OK in this case */
}
/* Check tablespace permissions */
if (check_rights &&
OidIsValid(tablespaceId) && tablespaceId != MyDatabaseTableSpace)
{
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid,
ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
}
/*
* Force shared indexes into the pg_global tablespace. This is a bit of a
* hack but seems simpler than marking them in the BKI commands. On the
* other hand, if it's not shared, don't allow it to be placed there.
*/
if (rel->rd_rel->relisshared)
tablespaceId = GLOBALTABLESPACE_OID;
else if (tablespaceId == GLOBALTABLESPACE_OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("only shared relations can be placed in pg_global tablespace")));
/*
* Choose the index column names.
*/
indexColNames = ChooseIndexColumnNames(allIndexParams);
/*
* Select name for index if caller didn't specify
*/
indexRelationName = stmt->idxname;
if (indexRelationName == NULL)
indexRelationName = ChooseIndexName(RelationGetRelationName(rel),
namespaceId,
indexColNames,
stmt->excludeOpNames,
stmt->primary,
stmt->isconstraint);
/*
* look up the access method, verify it can handle the requested features
*/
accessMethodName = stmt->accessMethod;
tuple = SearchSysCache1(AMNAME, PointerGetDatum(accessMethodName));
if (!HeapTupleIsValid(tuple))
2005-11-07 18:36:47 +01:00
{
/*
* Hack to provide more-or-less-transparent updating of old RTREE
2011-05-19 00:14:45 +02:00
* indexes to GiST: if RTREE is requested and not found, use GIST.
2005-11-07 18:36:47 +01:00
*/
if (strcmp(accessMethodName, "rtree") == 0)
{
ereport(NOTICE,
(errmsg("substituting access method \"gist\" for obsolete method \"rtree\"")));
accessMethodName = "gist";
tuple = SearchSysCache1(AMNAME, PointerGetDatum(accessMethodName));
2005-11-07 18:36:47 +01:00
}
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("access method \"%s\" does not exist",
accessMethodName)));
}
accessMethodForm = (Form_pg_am) GETSTRUCT(tuple);
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
accessMethodId = accessMethodForm->oid;
amRoutine = GetIndexAmRoutine(accessMethodForm->amhandler);
pgstat_progress_update_param(PROGRESS_CREATEIDX_ACCESS_METHOD_OID,
accessMethodId);
if (stmt->unique && !amRoutine->amcanunique)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("access method \"%s\" does not support unique indexes",
accessMethodName)));
if (stmt->indexIncludingParams != NIL && !amRoutine->amcaninclude)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("access method \"%s\" does not support included columns",
accessMethodName)));
if (numberOfKeyAttributes > 1 && !amRoutine->amcanmulticol)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("access method \"%s\" does not support multicolumn indexes",
accessMethodName)));
if (stmt->excludeOpNames && amRoutine->amgettuple == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("access method \"%s\" does not support exclusion constraints",
accessMethodName)));
amcanorder = amRoutine->amcanorder;
amoptions = amRoutine->amoptions;
amissummarizing = amRoutine->amsummarizing;
pfree(amRoutine);
ReleaseSysCache(tuple);
/*
* Validate predicate, if given
*/
if (stmt->whereClause)
CheckPredicate((Expr *) stmt->whereClause);
/*
* Parse AM-specific options, convert to text array form, validate.
*/
reloptions = transformRelOptions((Datum) 0, stmt->options,
NULL, NULL, false, false);
(void) index_reloptions(amoptions, reloptions, true);
/*
* Prepare arguments for index_create, primarily an IndexInfo structure.
* Note that predicates must be in implicit-AND format. In a concurrent
* build, mark it not-ready-for-inserts.
*/
indexInfo = makeIndexInfo(numberOfAttributes,
numberOfKeyAttributes,
accessMethodId,
NIL, /* expressions, NIL for now */
make_ands_implicit((Expr *) stmt->whereClause),
stmt->unique,
stmt->nulls_not_distinct,
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
!concurrent,
concurrent,
amissummarizing);
typeObjectId = palloc_array(Oid, numberOfAttributes);
collationObjectId = palloc_array(Oid, numberOfAttributes);
classObjectId = palloc_array(Oid, numberOfAttributes);
coloptions = palloc_array(int16, numberOfAttributes);
ComputeIndexAttrs(indexInfo,
typeObjectId, collationObjectId, classObjectId,
coloptions, allIndexParams,
stmt->excludeOpNames, relationId,
accessMethodName, accessMethodId,
amcanorder, stmt->isconstraint, root_save_userid,
root_save_sec_context, &root_save_nestlevel);
/*
* Extra checks when creating a PRIMARY KEY index.
*/
if (stmt->primary)
index_check_primary_key(rel, indexInfo, is_alter_table, stmt);
/*
* If this table is partitioned and we're creating a unique index or a
* primary key, make sure that the partition key is a subset of the
* index's columns. Otherwise it would be possible to violate uniqueness
* by putting values that ought to be unique in different partitions.
*
* We could lift this limitation if we had global indexes, but those have
* their own problems, so this is a useful feature combination.
*/
if (partitioned && (stmt->unique || stmt->primary))
{
Load relcache entries' partitioning data on-demand, not immediately. Formerly the rd_partkey and rd_partdesc data structures were always populated immediately when a relcache entry was built or rebuilt. This patch changes things so that they are populated only when they are first requested. (Hence, callers *must* now always use RelationGetPartitionKey or RelationGetPartitionDesc; just fetching the pointer directly is no longer acceptable.) This seems to have some performance benefits, but the main reason to do it is that it eliminates a recursive-reload failure that occurs if the partkey or partdesc expressions contain any references to the relation's rowtype (as discovered by Amit Langote). In retrospect, since loading these data structures might result in execution of nearly-arbitrary code via eval_const_expressions, it was a dumb idea to require that to happen during relcache entry rebuild. Also, fix things so that old copies of a relcache partition descriptor will be dropped when the cache entry's refcount goes to zero. In the previous coding it was possible for such copies to survive for the lifetime of the session, as I'd complained of in a previous discussion. (This management technique still isn't perfect, but it's better than before.) Improve the commentary explaining how that works and why it's safe to hand out direct pointers to these relcache substructures. In passing, improve RelationBuildPartitionDesc by using the same memory-context-parent-swap approach used by RelationBuildPartitionKey, thereby making it less dependent on strong assumptions about what partition_bounds_copy does. Avoid doing get_rel_relkind in the critical section, too. Patch by Amit Langote and Tom Lane; Robert Haas deserves some credit for prior work in the area, too. Although this is a pre-existing problem, no back-patch: the patch seems too invasive to be safe to back-patch, and the bug it fixes is a corner case that seems relatively unlikely to cause problems in the field. Discussion: https://postgr.es/m/CA+HiwqFUzjfj9HEsJtYWcr1SgQ_=iCAvQ=O2Sx6aQxoDu4OiHw@mail.gmail.com Discussion: https://postgr.es/m/CA+TgmoY3bRmGB6-DUnoVy5fJoreiBJ43rwMrQRCdPXuKt4Ykaw@mail.gmail.com
2019-12-25 20:43:13 +01:00
PartitionKey key = RelationGetPartitionKey(rel);
const char *constraint_type;
int i;
if (stmt->primary)
constraint_type = "PRIMARY KEY";
else if (stmt->unique)
constraint_type = "UNIQUE";
else if (stmt->excludeOpNames != NIL)
constraint_type = "EXCLUDE";
else
{
elog(ERROR, "unknown constraint type");
constraint_type = NULL; /* keep compiler quiet */
}
/*
* Verify that all the columns in the partition key appear in the
* unique key definition, with the same notion of equality.
*/
for (i = 0; i < key->partnatts; i++)
{
bool found = false;
int eq_strategy;
Oid ptkey_eqop;
int j;
/*
* Identify the equality operator associated with this partkey
* column. For list and range partitioning, partkeys use btree
* operator classes; hash partitioning uses hash operator classes.
* (Keep this in sync with ComputePartitionAttrs!)
*/
if (key->strategy == PARTITION_STRATEGY_HASH)
eq_strategy = HTEqualStrategyNumber;
else
eq_strategy = BTEqualStrategyNumber;
ptkey_eqop = get_opfamily_member(key->partopfamily[i],
key->partopcintype[i],
key->partopcintype[i],
eq_strategy);
if (!OidIsValid(ptkey_eqop))
elog(ERROR, "missing operator %d(%u,%u) in partition opfamily %u",
eq_strategy, key->partopcintype[i], key->partopcintype[i],
key->partopfamily[i]);
/*
* We'll need to be able to identify the equality operators
* associated with index columns, too. We know what to do with
* btree opclasses; if there are ever any other index types that
* support unique indexes, this logic will need extension.
*/
if (accessMethodId == BTREE_AM_OID)
eq_strategy = BTEqualStrategyNumber;
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot match partition key to an index using access method \"%s\"",
accessMethodName)));
/*
* It may be possible to support UNIQUE constraints when partition
* keys are expressions, but is it worth it? Give up for now.
*/
if (key->partattrs[i] == 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unsupported %s constraint with partition key definition",
constraint_type),
errdetail("%s constraints cannot be used when partition keys include expressions.",
constraint_type)));
/* Search the index column(s) for a match */
for (j = 0; j < indexInfo->ii_NumIndexKeyAttrs; j++)
{
if (key->partattrs[i] == indexInfo->ii_IndexAttrNumbers[j])
{
/* Matched the column, now what about the equality op? */
Oid idx_opfamily;
Oid idx_opcintype;
if (get_opclass_opfamily_and_input_type(classObjectId[j],
&idx_opfamily,
&idx_opcintype))
{
Oid idx_eqop;
idx_eqop = get_opfamily_member(idx_opfamily,
idx_opcintype,
idx_opcintype,
eq_strategy);
if (ptkey_eqop == idx_eqop)
{
found = true;
break;
}
}
}
}
if (!found)
{
Form_pg_attribute att;
att = TupleDescAttr(RelationGetDescr(rel),
key->partattrs[i] - 1);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unique constraint on partitioned table must include all partitioning columns"),
errdetail("%s constraint on table \"%s\" lacks column \"%s\" which is part of the partition key.",
constraint_type, RelationGetRelationName(rel),
NameStr(att->attname))));
}
}
}
/*
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
* We disallow indexes on system columns. They would not necessarily get
* updated correctly, and they don't seem useful anyway.
*/
for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{
AttrNumber attno = indexInfo->ii_IndexAttrNumbers[i];
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
if (attno < 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("index creation on system columns is not supported")));
}
/*
* Also check for system columns used in expressions or predicates.
*/
if (indexInfo->ii_Expressions || indexInfo->ii_Predicate)
{
Bitmapset *indexattrs = NULL;
pull_varattnos((Node *) indexInfo->ii_Expressions, 1, &indexattrs);
pull_varattnos((Node *) indexInfo->ii_Predicate, 1, &indexattrs);
for (int i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++)
{
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
if (bms_is_member(i - FirstLowInvalidHeapAttributeNumber,
indexattrs))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("index creation on system columns is not supported")));
}
}
/* Is index safe for others to ignore? See set_indexsafe_procflags() */
safe_index = indexInfo->ii_Expressions == NIL &&
indexInfo->ii_Predicate == NIL;
/*
* Report index creation if appropriate (delay this till after most of the
* error checks)
*/
if (stmt->isconstraint && !quiet)
{
const char *constraint_type;
if (stmt->primary)
constraint_type = "PRIMARY KEY";
else if (stmt->unique)
constraint_type = "UNIQUE";
else if (stmt->excludeOpNames != NIL)
constraint_type = "EXCLUDE";
else
{
elog(ERROR, "unknown constraint type");
constraint_type = NULL; /* keep compiler quiet */
}
ereport(DEBUG1,
(errmsg_internal("%s %s will create implicit index \"%s\" for table \"%s\"",
is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
constraint_type,
indexRelationName, RelationGetRelationName(rel))));
}
/*
Change internal RelFileNode references to RelFileNumber or RelFileLocator. We have been using the term RelFileNode to refer to either (1) the integer that is used to name the sequence of files for a certain relation within the directory set aside for that tablespace/database combination; or (2) that value plus the OIDs of the tablespace and database; or occasionally (3) the whole series of files created for a relation based on those values. Using the same name for more than one thing is confusing. Replace RelFileNode with RelFileNumber when we're talking about just the single number, i.e. (1) from above, and with RelFileLocator when we're talking about all the things that are needed to locate a relation's files on disk, i.e. (2) from above. In the places where we refer to (3) as a relfilenode, instead refer to "relation storage". Since there is a ton of SQL code in the world that knows about pg_class.relfilenode, don't change the name of that column, or of other SQL-facing things that derive their name from it. On the other hand, do adjust closely-related internal terminology. For example, the structure member names dbNode and spcNode appear to be derived from the fact that the structure itself was called RelFileNode, so change those to dbOid and spcOid. Likewise, various variables with names like rnode and relnode get renamed appropriately, according to how they're being used in context. Hopefully, this is clearer than before. It is also preparation for future patches that intend to widen the relfilenumber fields from its current width of 32 bits. Variables that store a relfilenumber are now declared as type RelFileNumber rather than type Oid; right now, these are the same, but that can now more easily be changed. Dilip Kumar, per an idea from me. Reviewed also by Andres Freund. I fixed some whitespace issues, changed a couple of words in a comment, and made one other minor correction. Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 17:39:09 +02:00
* A valid stmt->oldNumber implies that we already have a built form of
* the index. The caller should also decline any index build.
*/
Change internal RelFileNode references to RelFileNumber or RelFileLocator. We have been using the term RelFileNode to refer to either (1) the integer that is used to name the sequence of files for a certain relation within the directory set aside for that tablespace/database combination; or (2) that value plus the OIDs of the tablespace and database; or occasionally (3) the whole series of files created for a relation based on those values. Using the same name for more than one thing is confusing. Replace RelFileNode with RelFileNumber when we're talking about just the single number, i.e. (1) from above, and with RelFileLocator when we're talking about all the things that are needed to locate a relation's files on disk, i.e. (2) from above. In the places where we refer to (3) as a relfilenode, instead refer to "relation storage". Since there is a ton of SQL code in the world that knows about pg_class.relfilenode, don't change the name of that column, or of other SQL-facing things that derive their name from it. On the other hand, do adjust closely-related internal terminology. For example, the structure member names dbNode and spcNode appear to be derived from the fact that the structure itself was called RelFileNode, so change those to dbOid and spcOid. Likewise, various variables with names like rnode and relnode get renamed appropriately, according to how they're being used in context. Hopefully, this is clearer than before. It is also preparation for future patches that intend to widen the relfilenumber fields from its current width of 32 bits. Variables that store a relfilenumber are now declared as type RelFileNumber rather than type Oid; right now, these are the same, but that can now more easily be changed. Dilip Kumar, per an idea from me. Reviewed also by Andres Freund. I fixed some whitespace issues, changed a couple of words in a comment, and made one other minor correction. Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 17:39:09 +02:00
Assert(!RelFileNumberIsValid(stmt->oldNumber) || (skip_build && !concurrent));
/*
* Make the catalog entries for the index, including constraints. This
* step also actually builds the index, except if caller requested not to
* or in concurrent mode, in which case it'll be done later, or doing a
* partitioned index (because those don't have storage).
*/
flags = constr_flags = 0;
if (stmt->isconstraint)
flags |= INDEX_CREATE_ADD_CONSTRAINT;
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
if (skip_build || concurrent || partitioned)
flags |= INDEX_CREATE_SKIP_BUILD;
if (stmt->if_not_exists)
flags |= INDEX_CREATE_IF_NOT_EXISTS;
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
if (concurrent)
flags |= INDEX_CREATE_CONCURRENT;
if (partitioned)
flags |= INDEX_CREATE_PARTITIONED;
if (stmt->primary)
flags |= INDEX_CREATE_IS_PRIMARY;
/*
* If the table is partitioned, and recursion was declined but partitions
* exist, mark the index as invalid.
*/
if (partitioned && stmt->relation && !stmt->relation->inh)
{
Fix relcache inconsistency hazard in partition detach During queries coming from ri_triggers.c, we need to omit partitions that are marked pending detach -- otherwise, the RI query is tricked into allowing a row into the referencing table whose corresponding row is in the detached partition. Which is bogus: once the detach operation completes, the row becomes an orphan. However, the code was not doing that in repeatable-read transactions, because relcache kept a copy of the partition descriptor that included the partition, and used it in the RI query. This commit changes the partdesc cache code to only keep descriptors that aren't dependent on a snapshot (namely: those where no detached partition exist, and those where detached partitions are included). When a partdesc-without- detached-partitions is requested, we create one afresh each time; also, those partdescs are stored in PortalContext instead of CacheMemoryContext. find_inheritance_children gets a new output *detached_exist boolean, which indicates whether any partition marked pending-detach is found. Its "include_detached" input flag is changed to "omit_detached", because that name captures desired the semantics more naturally. CreatePartitionDirectory() and RelationGetPartitionDesc() arguments are identically renamed. This was noticed because a buildfarm member that runs with relcache clobbering, which would not keep the improperly cached partdesc, broke one test, which led us to realize that the expected output of that test was bogus. This commit also corrects that expected output. Author: Amit Langote <amitlangote09@gmail.com> Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Discussion: https://postgr.es/m/3269784.1617215412@sss.pgh.pa.us
2021-04-22 21:13:25 +02:00
PartitionDesc pd = RelationGetPartitionDesc(rel, true);
if (pd->nparts != 0)
flags |= INDEX_CREATE_INVALID;
}
if (stmt->deferrable)
constr_flags |= INDEX_CONSTR_CREATE_DEFERRABLE;
if (stmt->initdeferred)
constr_flags |= INDEX_CONSTR_CREATE_INIT_DEFERRED;
indexRelationId =
index_create(rel, indexRelationName, indexRelationId, parentIndexId,
parentConstraintId,
Change internal RelFileNode references to RelFileNumber or RelFileLocator. We have been using the term RelFileNode to refer to either (1) the integer that is used to name the sequence of files for a certain relation within the directory set aside for that tablespace/database combination; or (2) that value plus the OIDs of the tablespace and database; or occasionally (3) the whole series of files created for a relation based on those values. Using the same name for more than one thing is confusing. Replace RelFileNode with RelFileNumber when we're talking about just the single number, i.e. (1) from above, and with RelFileLocator when we're talking about all the things that are needed to locate a relation's files on disk, i.e. (2) from above. In the places where we refer to (3) as a relfilenode, instead refer to "relation storage". Since there is a ton of SQL code in the world that knows about pg_class.relfilenode, don't change the name of that column, or of other SQL-facing things that derive their name from it. On the other hand, do adjust closely-related internal terminology. For example, the structure member names dbNode and spcNode appear to be derived from the fact that the structure itself was called RelFileNode, so change those to dbOid and spcOid. Likewise, various variables with names like rnode and relnode get renamed appropriately, according to how they're being used in context. Hopefully, this is clearer than before. It is also preparation for future patches that intend to widen the relfilenumber fields from its current width of 32 bits. Variables that store a relfilenumber are now declared as type RelFileNumber rather than type Oid; right now, these are the same, but that can now more easily be changed. Dilip Kumar, per an idea from me. Reviewed also by Andres Freund. I fixed some whitespace issues, changed a couple of words in a comment, and made one other minor correction. Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 17:39:09 +02:00
stmt->oldNumber, indexInfo, indexColNames,
accessMethodId, tablespaceId,
collationObjectId, classObjectId,
coloptions, reloptions,
flags, constr_flags,
allowSystemTableMods, !check_rights,
&createdConstraintId);
ObjectAddressSet(address, RelationRelationId, indexRelationId);
if (!OidIsValid(indexRelationId))
{
/*
* Roll back any GUC changes executed by index functions. Also revert
* to original default_tablespace if we changed it above.
*/
AtEOXact_GUC(false, root_save_nestlevel);
/* Restore userid and security context */
SetUserIdAndSecContext(root_save_userid, root_save_sec_context);
table_close(rel, NoLock);
/* If this is the top-level index, we're done */
if (!OidIsValid(parentIndexId))
pgstat_progress_end_command();
return address;
}
/*
* Roll back any GUC changes executed by index functions, and keep
* subsequent changes local to this command. This is essential if some
* index function changed a behavior-affecting GUC, e.g. search_path.
*/
AtEOXact_GUC(false, root_save_nestlevel);
root_save_nestlevel = NewGUCNestLevel();
/* Add any requested comment */
if (stmt->idxcomment != NULL)
CreateComments(indexRelationId, RelationRelationId, 0,
stmt->idxcomment);
if (partitioned)
{
PartitionDesc partdesc;
/*
* Unless caller specified to skip this step (via ONLY), process each
* partition to make sure they all contain a corresponding index.
*
* If we're called internally (no stmt->relation), recurse always.
*/
Fix relcache inconsistency hazard in partition detach During queries coming from ri_triggers.c, we need to omit partitions that are marked pending detach -- otherwise, the RI query is tricked into allowing a row into the referencing table whose corresponding row is in the detached partition. Which is bogus: once the detach operation completes, the row becomes an orphan. However, the code was not doing that in repeatable-read transactions, because relcache kept a copy of the partition descriptor that included the partition, and used it in the RI query. This commit changes the partdesc cache code to only keep descriptors that aren't dependent on a snapshot (namely: those where no detached partition exist, and those where detached partitions are included). When a partdesc-without- detached-partitions is requested, we create one afresh each time; also, those partdescs are stored in PortalContext instead of CacheMemoryContext. find_inheritance_children gets a new output *detached_exist boolean, which indicates whether any partition marked pending-detach is found. Its "include_detached" input flag is changed to "omit_detached", because that name captures desired the semantics more naturally. CreatePartitionDirectory() and RelationGetPartitionDesc() arguments are identically renamed. This was noticed because a buildfarm member that runs with relcache clobbering, which would not keep the improperly cached partdesc, broke one test, which led us to realize that the expected output of that test was bogus. This commit also corrects that expected output. Author: Amit Langote <amitlangote09@gmail.com> Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Discussion: https://postgr.es/m/3269784.1617215412@sss.pgh.pa.us
2021-04-22 21:13:25 +02:00
partdesc = RelationGetPartitionDesc(rel, true);
if ((!stmt->relation || stmt->relation->inh) && partdesc->nparts > 0)
{
int nparts = partdesc->nparts;
Oid *part_oids = palloc_array(Oid, nparts);
bool invalidate_parent = false;
Relation parentIndex;
TupleDesc parentDesc;
/*
* Report the total number of partitions at the start of the
* command; don't update it when being called recursively.
*/
if (!OidIsValid(parentIndexId))
{
/*
* When called by ProcessUtilitySlow, the number of partitions
* is passed in as an optimization; but other callers pass -1
* since they don't have the value handy. This should count
* partitions the same way, ie one less than the number of
* relations find_all_inheritors reports.
*
* We assume we needn't ask find_all_inheritors to take locks,
* because that should have happened already for all callers.
* Even if it did not, this is safe as long as we don't try to
* touch the partitions here; the worst consequence would be a
* bogus progress-reporting total.
*/
if (total_parts < 0)
{
List *children = find_all_inheritors(relationId,
NoLock, NULL);
total_parts = list_length(children) - 1;
list_free(children);
}
pgstat_progress_update_param(PROGRESS_CREATEIDX_PARTITIONS_TOTAL,
total_parts);
}
/* Make a local copy of partdesc->oids[], just for safety */
memcpy(part_oids, partdesc->oids, sizeof(Oid) * nparts);
/*
* We'll need an IndexInfo describing the parent index. The one
* built above is almost good enough, but not quite, because (for
* example) its predicate expression if any hasn't been through
* expression preprocessing. The most reliable way to get an
* IndexInfo that will match those for child indexes is to build
* it the same way, using BuildIndexInfo().
*/
parentIndex = index_open(indexRelationId, lockmode);
indexInfo = BuildIndexInfo(parentIndex);
parentDesc = RelationGetDescr(rel);
/*
* For each partition, scan all existing indexes; if one matches
* our index definition and is not already attached to some other
* parent index, attach it to the one we just created.
*
* If none matches, build a new index by calling ourselves
* recursively with the same options (except for the index name).
*/
for (int i = 0; i < nparts; i++)
{
Oid childRelid = part_oids[i];
Relation childrel;
Oid child_save_userid;
int child_save_sec_context;
int child_save_nestlevel;
List *childidxs;
ListCell *cell;
AttrMap *attmap;
bool found = false;
childrel = table_open(childRelid, lockmode);
GetUserIdAndSecContext(&child_save_userid,
&child_save_sec_context);
SetUserIdAndSecContext(childrel->rd_rel->relowner,
child_save_sec_context | SECURITY_RESTRICTED_OPERATION);
child_save_nestlevel = NewGUCNestLevel();
/*
* Don't try to create indexes on foreign tables, though. Skip
* those if a regular index, or fail if trying to create a
* constraint index.
*/
if (childrel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
{
if (stmt->unique || stmt->primary)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot create unique index on partitioned table \"%s\"",
RelationGetRelationName(rel)),
errdetail("Table \"%s\" contains partitions that are foreign tables.",
RelationGetRelationName(rel))));
AtEOXact_GUC(false, child_save_nestlevel);
SetUserIdAndSecContext(child_save_userid,
child_save_sec_context);
table_close(childrel, lockmode);
continue;
}
childidxs = RelationGetIndexList(childrel);
attmap =
build_attrmap_by_name(RelationGetDescr(childrel),
parentDesc,
false);
foreach(cell, childidxs)
{
Oid cldidxid = lfirst_oid(cell);
Relation cldidx;
IndexInfo *cldIdxInfo;
/* this index is already partition of another one */
if (has_superclass(cldidxid))
continue;
cldidx = index_open(cldidxid, lockmode);
cldIdxInfo = BuildIndexInfo(cldidx);
if (CompareIndexInfo(cldIdxInfo, indexInfo,
cldidx->rd_indcollation,
parentIndex->rd_indcollation,
cldidx->rd_opfamily,
parentIndex->rd_opfamily,
attmap))
{
Oid cldConstrOid = InvalidOid;
/*
* Found a match.
*
* If this index is being created in the parent
* because of a constraint, then the child needs to
* have a constraint also, so look for one. If there
* is no such constraint, this index is no good, so
* keep looking.
*/
if (createdConstraintId != InvalidOid)
{
cldConstrOid =
get_relation_idx_constraint_oid(childRelid,
cldidxid);
if (cldConstrOid == InvalidOid)
{
index_close(cldidx, lockmode);
continue;
}
}
/* Attach index to parent and we're done. */
IndexSetParentIndex(cldidx, indexRelationId);
if (createdConstraintId != InvalidOid)
ConstraintSetParentConstraint(cldConstrOid,
Redesign the partition dependency mechanism. The original setup for dependencies of partitioned objects had serious problems: 1. It did not verify that a drop cascading to a partition-child object also cascaded to at least one of the object's partition parents. Now, normally a child object would share all its dependencies with one or another parent (e.g. a child index's opclass dependencies would be shared with the parent index), so that this oversight is usually harmless. But if some dependency failed to fit this pattern, the child could be dropped while all its parents remain, creating a logically broken situation. (It's easy to construct artificial cases that break it, such as attaching an unrelated extension dependency to the child object and then dropping the extension. I'm not sure if any less-artificial cases exist.) 2. Management of partition dependencies during ATTACH/DETACH PARTITION was complicated and buggy; for example, after detaching a partition table it was possible to create cases where a formerly-child index should be dropped and was not, because the correct set of dependencies had not been reconstructed. Less seriously, because multiple partition relationships were represented identically in pg_depend, there was an order-of-traversal dependency on which partition parent was cited in error messages. We also had some pre-existing order-of-traversal hazards for error messages related to internal and extension dependencies. This is cosmetic to users but causes testing problems. To fix #1, add a check at the end of the partition tree traversal to ensure that at least one partition parent got deleted. To fix #2, establish a new policy that partition dependencies are in addition to, not instead of, a child object's usual dependencies; in this way ATTACH/DETACH PARTITION need not cope with adding or removing the usual dependencies. To fix the cosmetic problem, distinguish between primary and secondary partition dependency entries in pg_depend, by giving them different deptypes. (They behave identically except for having different priorities for being cited in error messages.) This means that the former 'I' dependency type is replaced with new 'P' and 'S' types. This also fixes a longstanding bug that after handling an internal dependency by recursing to the owning object, findDependentObjects did not verify that the current target was now scheduled for deletion, and did not apply the current recursion level's objflags to it. Perhaps that should be back-patched; but in the back branches it would only matter if some concurrent transaction had removed the internal-linkage pg_depend entry before the recursive call found it, or the recursive call somehow failed to find it, both of which seem unlikely. Catversion bump because the contents of pg_depend change for partitioning relationships. Patch HEAD only. It's annoying that we're not fixing #2 in v11, but there seems no practical way to do so given that the problem is exactly a poor choice of what entries to put in pg_depend. We can't really fix that while staying compatible with what's in pg_depend in existing v11 installations. Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
createdConstraintId,
childRelid);
if (!cldidx->rd_index->indisvalid)
invalidate_parent = true;
found = true;
/*
* Report this partition as processed. Note that if
* the partition has children itself, we'd ideally
* count the children and update the progress report
* for all of them; but that seems unduly expensive.
* Instead, the progress report will act like all such
* indirect children were processed in zero time at
* the end of the command.
*/
pgstat_progress_incr_param(PROGRESS_CREATEIDX_PARTITIONS_DONE, 1);
/* keep lock till commit */
index_close(cldidx, NoLock);
break;
}
index_close(cldidx, lockmode);
}
list_free(childidxs);
AtEOXact_GUC(false, child_save_nestlevel);
SetUserIdAndSecContext(child_save_userid,
child_save_sec_context);
table_close(childrel, NoLock);
/*
* If no matching index was found, create our own.
*/
if (!found)
{
IndexStmt *childStmt = copyObject(stmt);
bool found_whole_row;
ListCell *lc;
Apply stopgap fix for bug #15672. Fix DefineIndex so that it doesn't attempt to pass down a to-be-reused index relfilenode to a child index creation, and fix TryReuseIndex to not think that reuse is sensible for a partitioned index. In v11, this fixes a problem where ALTER TABLE on a partitioned table could assign the same relfilenode to several different child indexes, causing very nasty catalog corruption --- in fact, attempting to DROP the partitioned table then leads not only to a database crash, but to inability to restart because the same crash will recur during WAL replay. Either of these two changes would be enough to prevent the failure, but since neither action could possibly be sane, let's put in both changes for future-proofing. In HEAD, no such bug manifests, but that's just an accidental consequence of having changed the pg_class representation of partitioned indexes to have relfilenode = 0. Both of these changes still seem like smart future-proofing. This is only a stop-gap because the code for ALTER TABLE on a partitioned table with a no-op type change still leaves a great deal to be desired. As the added regression tests show, it gets things wrong for comments on child indexes/constraints, and it is regenerating child indexes it doesn't have to. However, fixing those problems will take more work which may not get back-patched into v11. We need a fix for the corruption problem now. Per bug #15672 from Jianing Yang. Patch by me, regression test cases based on work by Amit Langote, who also did a lot of the investigative work. Discussion: https://postgr.es/m/15672-b9fa7db32698269f@postgresql.org
2019-04-26 23:18:07 +02:00
/*
* We can't use the same index name for the child index,
* so clear idxname to let the recursive invocation choose
* a new name. Likewise, the existing target relation
Change internal RelFileNode references to RelFileNumber or RelFileLocator. We have been using the term RelFileNode to refer to either (1) the integer that is used to name the sequence of files for a certain relation within the directory set aside for that tablespace/database combination; or (2) that value plus the OIDs of the tablespace and database; or occasionally (3) the whole series of files created for a relation based on those values. Using the same name for more than one thing is confusing. Replace RelFileNode with RelFileNumber when we're talking about just the single number, i.e. (1) from above, and with RelFileLocator when we're talking about all the things that are needed to locate a relation's files on disk, i.e. (2) from above. In the places where we refer to (3) as a relfilenode, instead refer to "relation storage". Since there is a ton of SQL code in the world that knows about pg_class.relfilenode, don't change the name of that column, or of other SQL-facing things that derive their name from it. On the other hand, do adjust closely-related internal terminology. For example, the structure member names dbNode and spcNode appear to be derived from the fact that the structure itself was called RelFileNode, so change those to dbOid and spcOid. Likewise, various variables with names like rnode and relnode get renamed appropriately, according to how they're being used in context. Hopefully, this is clearer than before. It is also preparation for future patches that intend to widen the relfilenumber fields from its current width of 32 bits. Variables that store a relfilenumber are now declared as type RelFileNumber rather than type Oid; right now, these are the same, but that can now more easily be changed. Dilip Kumar, per an idea from me. Reviewed also by Andres Freund. I fixed some whitespace issues, changed a couple of words in a comment, and made one other minor correction. Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 17:39:09 +02:00
* field is wrong, and if indexOid or oldNumber are set,
Apply stopgap fix for bug #15672. Fix DefineIndex so that it doesn't attempt to pass down a to-be-reused index relfilenode to a child index creation, and fix TryReuseIndex to not think that reuse is sensible for a partitioned index. In v11, this fixes a problem where ALTER TABLE on a partitioned table could assign the same relfilenode to several different child indexes, causing very nasty catalog corruption --- in fact, attempting to DROP the partitioned table then leads not only to a database crash, but to inability to restart because the same crash will recur during WAL replay. Either of these two changes would be enough to prevent the failure, but since neither action could possibly be sane, let's put in both changes for future-proofing. In HEAD, no such bug manifests, but that's just an accidental consequence of having changed the pg_class representation of partitioned indexes to have relfilenode = 0. Both of these changes still seem like smart future-proofing. This is only a stop-gap because the code for ALTER TABLE on a partitioned table with a no-op type change still leaves a great deal to be desired. As the added regression tests show, it gets things wrong for comments on child indexes/constraints, and it is regenerating child indexes it doesn't have to. However, fixing those problems will take more work which may not get back-patched into v11. We need a fix for the corruption problem now. Per bug #15672 from Jianing Yang. Patch by me, regression test cases based on work by Amit Langote, who also did a lot of the investigative work. Discussion: https://postgr.es/m/15672-b9fa7db32698269f@postgresql.org
2019-04-26 23:18:07 +02:00
* they mustn't be applied to the child either.
*/
childStmt->idxname = NULL;
childStmt->relation = NULL;
childStmt->indexOid = InvalidOid;
Change internal RelFileNode references to RelFileNumber or RelFileLocator. We have been using the term RelFileNode to refer to either (1) the integer that is used to name the sequence of files for a certain relation within the directory set aside for that tablespace/database combination; or (2) that value plus the OIDs of the tablespace and database; or occasionally (3) the whole series of files created for a relation based on those values. Using the same name for more than one thing is confusing. Replace RelFileNode with RelFileNumber when we're talking about just the single number, i.e. (1) from above, and with RelFileLocator when we're talking about all the things that are needed to locate a relation's files on disk, i.e. (2) from above. In the places where we refer to (3) as a relfilenode, instead refer to "relation storage". Since there is a ton of SQL code in the world that knows about pg_class.relfilenode, don't change the name of that column, or of other SQL-facing things that derive their name from it. On the other hand, do adjust closely-related internal terminology. For example, the structure member names dbNode and spcNode appear to be derived from the fact that the structure itself was called RelFileNode, so change those to dbOid and spcOid. Likewise, various variables with names like rnode and relnode get renamed appropriately, according to how they're being used in context. Hopefully, this is clearer than before. It is also preparation for future patches that intend to widen the relfilenumber fields from its current width of 32 bits. Variables that store a relfilenumber are now declared as type RelFileNumber rather than type Oid; right now, these are the same, but that can now more easily be changed. Dilip Kumar, per an idea from me. Reviewed also by Andres Freund. I fixed some whitespace issues, changed a couple of words in a comment, and made one other minor correction. Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 17:39:09 +02:00
childStmt->oldNumber = InvalidRelFileNumber;
Skip WAL for new relfilenodes, under wal_level=minimal. Until now, only selected bulk operations (e.g. COPY) did this. If a given relfilenode received both a WAL-skipping COPY and a WAL-logged operation (e.g. INSERT), recovery could lose tuples from the COPY. See src/backend/access/transam/README section "Skipping WAL for New RelFileNode" for the new coding rules. Maintainers of table access methods should examine that section. To maintain data durability, just before commit, we choose between an fsync of the relfilenode and copying its contents to WAL. A new GUC, wal_skip_threshold, guides that choice. If this change slows a workload that creates small, permanent relfilenodes under wal_level=minimal, try adjusting wal_skip_threshold. Users setting a timeout on COMMIT may need to adjust that timeout, and log_min_duration_statement analysis will reflect time consumption moving to COMMIT from commands like COPY. Internally, this requires a reliable determination of whether RollbackAndReleaseCurrentSubTransaction() would unlink a relation's current relfilenode. Introduce rd_firstRelfilenodeSubid. Amend the specification of rd_createSubid such that the field is zero when a new rel has an old rd_node. Make relcache.c retain entries for certain dropped relations until end of transaction. Bump XLOG_PAGE_MAGIC, since this introduces XLOG_GIST_ASSIGN_LSN. Future servers accept older WAL, so this bump is discretionary. Kyotaro Horiguchi, reviewed (in earlier, similar versions) by Robert Haas. Heikki Linnakangas and Michael Paquier implemented earlier designs that materially clarified the problem. Reviewed, in earlier designs, by Andrew Dunstan, Andres Freund, Alvaro Herrera, Tom Lane, Fujii Masao, and Simon Riggs. Reported by Martijn van Oosterhout. Discussion: https://postgr.es/m/20150702220524.GA9392@svana.org
2020-04-04 21:25:34 +02:00
childStmt->oldCreateSubid = InvalidSubTransactionId;
Change internal RelFileNode references to RelFileNumber or RelFileLocator. We have been using the term RelFileNode to refer to either (1) the integer that is used to name the sequence of files for a certain relation within the directory set aside for that tablespace/database combination; or (2) that value plus the OIDs of the tablespace and database; or occasionally (3) the whole series of files created for a relation based on those values. Using the same name for more than one thing is confusing. Replace RelFileNode with RelFileNumber when we're talking about just the single number, i.e. (1) from above, and with RelFileLocator when we're talking about all the things that are needed to locate a relation's files on disk, i.e. (2) from above. In the places where we refer to (3) as a relfilenode, instead refer to "relation storage". Since there is a ton of SQL code in the world that knows about pg_class.relfilenode, don't change the name of that column, or of other SQL-facing things that derive their name from it. On the other hand, do adjust closely-related internal terminology. For example, the structure member names dbNode and spcNode appear to be derived from the fact that the structure itself was called RelFileNode, so change those to dbOid and spcOid. Likewise, various variables with names like rnode and relnode get renamed appropriately, according to how they're being used in context. Hopefully, this is clearer than before. It is also preparation for future patches that intend to widen the relfilenumber fields from its current width of 32 bits. Variables that store a relfilenumber are now declared as type RelFileNumber rather than type Oid; right now, these are the same, but that can now more easily be changed. Dilip Kumar, per an idea from me. Reviewed also by Andres Freund. I fixed some whitespace issues, changed a couple of words in a comment, and made one other minor correction. Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 17:39:09 +02:00
childStmt->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
Apply stopgap fix for bug #15672. Fix DefineIndex so that it doesn't attempt to pass down a to-be-reused index relfilenode to a child index creation, and fix TryReuseIndex to not think that reuse is sensible for a partitioned index. In v11, this fixes a problem where ALTER TABLE on a partitioned table could assign the same relfilenode to several different child indexes, causing very nasty catalog corruption --- in fact, attempting to DROP the partitioned table then leads not only to a database crash, but to inability to restart because the same crash will recur during WAL replay. Either of these two changes would be enough to prevent the failure, but since neither action could possibly be sane, let's put in both changes for future-proofing. In HEAD, no such bug manifests, but that's just an accidental consequence of having changed the pg_class representation of partitioned indexes to have relfilenode = 0. Both of these changes still seem like smart future-proofing. This is only a stop-gap because the code for ALTER TABLE on a partitioned table with a no-op type change still leaves a great deal to be desired. As the added regression tests show, it gets things wrong for comments on child indexes/constraints, and it is regenerating child indexes it doesn't have to. However, fixing those problems will take more work which may not get back-patched into v11. We need a fix for the corruption problem now. Per bug #15672 from Jianing Yang. Patch by me, regression test cases based on work by Amit Langote, who also did a lot of the investigative work. Discussion: https://postgr.es/m/15672-b9fa7db32698269f@postgresql.org
2019-04-26 23:18:07 +02:00
/*
* Adjust any Vars (both in expressions and in the index's
* WHERE clause) to match the partition's column numbering
* in case it's different from the parent's.
*/
foreach(lc, childStmt->indexParams)
{
IndexElem *ielem = lfirst(lc);
/*
* If the index parameter is an expression, we must
* translate it to contain child Vars.
*/
if (ielem->expr)
{
ielem->expr =
map_variable_attnos((Node *) ielem->expr,
1, 0, attmap,
InvalidOid,
&found_whole_row);
if (found_whole_row)
elog(ERROR, "cannot convert whole-row table reference");
}
}
childStmt->whereClause =
map_variable_attnos(stmt->whereClause, 1, 0,
attmap,
InvalidOid, &found_whole_row);
if (found_whole_row)
elog(ERROR, "cannot convert whole-row table reference");
/*
* Recurse as the starting user ID. Callee will use that
* for permission checks, then switch again.
*/
Assert(GetUserId() == child_save_userid);
SetUserIdAndSecContext(root_save_userid,
root_save_sec_context);
DefineIndex(childRelid, childStmt,
InvalidOid, /* no predefined OID */
indexRelationId, /* this is our child */
createdConstraintId,
-1,
is_alter_table, check_rights, check_not_in_use,
skip_build, quiet);
SetUserIdAndSecContext(child_save_userid,
child_save_sec_context);
}
free_attrmap(attmap);
}
index_close(parentIndex, lockmode);
/*
* The pg_index row we inserted for this index was marked
* indisvalid=true. But if we attached an existing index that is
* invalid, this is incorrect, so update our row to invalid too.
*/
if (invalidate_parent)
{
Relation pg_index = table_open(IndexRelationId, RowExclusiveLock);
HeapTuple tup,
newtup;
tup = SearchSysCache1(INDEXRELID,
ObjectIdGetDatum(indexRelationId));
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for index %u",
indexRelationId);
newtup = heap_copytuple(tup);
((Form_pg_index) GETSTRUCT(newtup))->indisvalid = false;
CatalogTupleUpdate(pg_index, &tup->t_self, newtup);
ReleaseSysCache(tup);
table_close(pg_index, RowExclusiveLock);
heap_freetuple(newtup);
}
}
/*
* Indexes on partitioned tables are not themselves built, so we're
* done here.
*/
AtEOXact_GUC(false, root_save_nestlevel);
SetUserIdAndSecContext(root_save_userid, root_save_sec_context);
table_close(rel, NoLock);
if (!OidIsValid(parentIndexId))
pgstat_progress_end_command();
else
{
/* Update progress for an intermediate partitioned index itself */
pgstat_progress_incr_param(PROGRESS_CREATEIDX_PARTITIONS_DONE, 1);
}
return address;
}
AtEOXact_GUC(false, root_save_nestlevel);
SetUserIdAndSecContext(root_save_userid, root_save_sec_context);
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
if (!concurrent)
{
/* Close the heap and we're done, in the non-concurrent case */
table_close(rel, NoLock);
/*
* If this is the top-level index, the command is done overall;
* otherwise, increment progress to report one child index is done.
*/
if (!OidIsValid(parentIndexId))
pgstat_progress_end_command();
else
pgstat_progress_incr_param(PROGRESS_CREATEIDX_PARTITIONS_DONE, 1);
return address;
}
/* save lockrelid and locktag for below, then close rel */
heaprelid = rel->rd_lockInfo.lockRelId;
SET_LOCKTAG_RELATION(heaplocktag, heaprelid.dbId, heaprelid.relId);
table_close(rel, NoLock);
/*
* For a concurrent build, it's important to make the catalog entries
* visible to other transactions before we start to build the index. That
* will prevent them from making incompatible HOT updates. The new index
* will be marked not indisready and not indisvalid, so that no one else
* tries to either insert into it or use it for queries.
*
* We must commit our current transaction so that the index becomes
* visible; then start another. Note that all the data structures we just
* built are lost in the commit. The only data we keep past here are the
* relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
* that neither it nor the index can be dropped before we finish. This
* cannot block, even if someone else is waiting for access, because we
* already have the same lock within our transaction.
*
* Note: we don't currently bother with a session lock on the index,
* because there are no operations that could change its state while we
* hold lock on the parent table. This might need to change later.
*/
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
PopActiveSnapshot();
CommitTransactionCommand();
StartTransactionCommand();
/* Tell concurrent index builds to ignore us, if index qualifies */
if (safe_index)
set_indexsafe_procflags();
/*
* The index is now visible, so we can report the OID. While on it,
* include the report for the beginning of phase 2.
*/
{
const int progress_cols[] = {
PROGRESS_CREATEIDX_INDEX_OID,
PROGRESS_CREATEIDX_PHASE
};
const int64 progress_vals[] = {
indexRelationId,
PROGRESS_CREATEIDX_PHASE_WAIT_1
};
pgstat_progress_update_multi_param(2, progress_cols, progress_vals);
}
/*
* Phase 2 of concurrent index build (see comments for validate_index()
* for an overview of how this works)
*
* Now we must wait until no running transaction could have the table open
* with the old list of indexes. Use ShareLock to consider running
* transactions that hold locks that permit writing to the table. Note we
* do not need to worry about xacts that open the table for writing after
* this point; they will see the new index when they open it.
*
* Note: the reason we use actual lock acquisition here, rather than just
* checking the ProcArray and sleeping, is that deadlock is possible if
* one of the transactions in question is blocked trying to acquire an
* exclusive lock on our table. The lock code will detect deadlock and
* error out properly.
*/
WaitForLockers(heaplocktag, ShareLock, true);
/*
* At this moment we are sure that there are no transactions with the
* table open for write that don't have this new index in their list of
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
* deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
* We now take a new snapshot, and build the index using all tuples that
* are visible in this snapshot. We can be sure that any HOT updates to
* these tuples will be compatible with the index, since any updates made
* by transactions that didn't know about the index are now committed or
* rolled back. Thus, each visible tuple is either the end of its
* HOT-chain or the extension of the chain is HOT-safe for this index.
*/
/* Set ActiveSnapshot since functions in the indexes may need it */
PushActiveSnapshot(GetTransactionSnapshot());
/* Perform concurrent build of index */
index_concurrently_build(relationId, indexRelationId);
/* we can do away with our snapshot */
PopActiveSnapshot();
/*
* Commit this transaction to make the indisready update visible.
*/
CommitTransactionCommand();
StartTransactionCommand();
/* Tell concurrent index builds to ignore us, if index qualifies */
if (safe_index)
set_indexsafe_procflags();
/*
* Phase 3 of concurrent index build
*
* We once again wait until no transaction can have the table open with
* the index marked as read-only for updates.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_PHASE_WAIT_2);
WaitForLockers(heaplocktag, ShareLock, true);
/*
* Now take the "reference snapshot" that will be used by validate_index()
* to filter candidate tuples. Beware! There might still be snapshots in
* use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
* those transactions which see the deleting one as still-in-progress will
* expect such tuples to be there once we mark the index as valid.
*
* We solve this by waiting for all endangered transactions to exit before
* we mark the index as valid.
*
* We also set ActiveSnapshot to this snap, since functions in indexes may
* need a snapshot.
*/
snapshot = RegisterSnapshot(GetTransactionSnapshot());
PushActiveSnapshot(snapshot);
/*
* Scan the index and the heap, insert any missing index entries.
*/
validate_index(relationId, indexRelationId, snapshot);
/*
* Drop the reference snapshot. We must do this before waiting out other
* snapshot holders, else we will deadlock against other processes also
* doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one
* they must wait for. But first, save the snapshot's xmin to use as
* limitXmin for GetCurrentVirtualXIDs().
*/
limitXmin = snapshot->xmin;
PopActiveSnapshot();
UnregisterSnapshot(snapshot);
/*
* The snapshot subsystem could still contain registered snapshots that
* are holding back our process's advertised xmin; in particular, if
* default_transaction_isolation = serializable, there is a transaction
* snapshot that is still active. The CatalogSnapshot is likewise a
* hazard. To ensure no deadlocks, we must commit and start yet another
* transaction, and do our wait before any snapshot has been taken in it.
*/
CommitTransactionCommand();
StartTransactionCommand();
/* Tell concurrent index builds to ignore us, if index qualifies */
if (safe_index)
set_indexsafe_procflags();
/* We should now definitely not be advertising any xmin. */
Assert(MyProc->xmin == InvalidTransactionId);
/*
* The index is now valid in the sense that it contains all currently
* interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_PHASE_WAIT_3);
WaitForOlderSnapshots(limitXmin, true);
/*
* Index can now be marked valid -- update its pg_index entry
*/
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY. Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor choice of catalog state representation. The pg_index state for an index that's reached the final pre-drop stage was the same as the state for an index just created by CREATE INDEX CONCURRENTLY. This meant that the (necessary) change to make RelationGetIndexList ignore about-to-die indexes also made it ignore freshly-created indexes; which is catastrophic because the latter do need to be considered in HOT-safety decisions. Failure to do so leads to incorrect index entries and subsequently wrong results from queries depending on the concurrently-created index. To fix, add an additional boolean column "indislive" to pg_index, so that the freshly-created and about-to-die states can be distinguished. (This change obviously is only possible in HEAD. This patch will need to be back-patched, but in 9.2 we'll use a kluge consisting of overloading the formerly-impossible state of indisvalid = true and indisready = false.) In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index flag changes they make without exclusive lock on the index are made via heap_inplace_update() rather than a normal transactional update. The latter is not very safe because moving the pg_index tuple could result in concurrent SnapshotNow scans finding it twice or not at all, thus possibly resulting in index corruption. This is a pre-existing bug in CREATE INDEX CONCURRENTLY, which was copied into the DROP code. In addition, fix various places in the code that ought to check to make sure that the indexes they are manipulating are valid and/or ready as appropriate. These represent bugs that have existed since 8.2, since a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid index behind, and we ought not try to do anything that might fail with such an index. Also fix RelationReloadIndexInfo to ensure it copies all the pg_index columns that are allowed to change after initial creation. Previously we could have been left with stale values of some fields in an index relcache entry. It's not clear whether this actually had any user-visible consequences, but it's at least a bug waiting to happen. In addition, do some code and docs review for DROP INDEX CONCURRENTLY; some cosmetic code cleanup but mostly addition and revision of comments. This will need to be back-patched, but in a noticeably different form, so I'm committing it to HEAD before working on the back-patch. Problem reported by Amit Kapila, diagnosis by Pavan Deolassee, fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
index_set_state_flags(indexRelationId, INDEX_CREATE_SET_VALID);
/*
* The pg_index update will cause backends (including this one) to update
* relcache entries for the index itself, but we should also send a
* relcache inval on the parent table to force replanning of cached plans.
* Otherwise existing sessions might fail to use the new index where it
* would be useful. (Note that our earlier commits did not create reasons
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY. Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor choice of catalog state representation. The pg_index state for an index that's reached the final pre-drop stage was the same as the state for an index just created by CREATE INDEX CONCURRENTLY. This meant that the (necessary) change to make RelationGetIndexList ignore about-to-die indexes also made it ignore freshly-created indexes; which is catastrophic because the latter do need to be considered in HOT-safety decisions. Failure to do so leads to incorrect index entries and subsequently wrong results from queries depending on the concurrently-created index. To fix, add an additional boolean column "indislive" to pg_index, so that the freshly-created and about-to-die states can be distinguished. (This change obviously is only possible in HEAD. This patch will need to be back-patched, but in 9.2 we'll use a kluge consisting of overloading the formerly-impossible state of indisvalid = true and indisready = false.) In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index flag changes they make without exclusive lock on the index are made via heap_inplace_update() rather than a normal transactional update. The latter is not very safe because moving the pg_index tuple could result in concurrent SnapshotNow scans finding it twice or not at all, thus possibly resulting in index corruption. This is a pre-existing bug in CREATE INDEX CONCURRENTLY, which was copied into the DROP code. In addition, fix various places in the code that ought to check to make sure that the indexes they are manipulating are valid and/or ready as appropriate. These represent bugs that have existed since 8.2, since a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid index behind, and we ought not try to do anything that might fail with such an index. Also fix RelationReloadIndexInfo to ensure it copies all the pg_index columns that are allowed to change after initial creation. Previously we could have been left with stale values of some fields in an index relcache entry. It's not clear whether this actually had any user-visible consequences, but it's at least a bug waiting to happen. In addition, do some code and docs review for DROP INDEX CONCURRENTLY; some cosmetic code cleanup but mostly addition and revision of comments. This will need to be back-patched, but in a noticeably different form, so I'm committing it to HEAD before working on the back-patch. Problem reported by Amit Kapila, diagnosis by Pavan Deolassee, fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
* to replan; so relcache flush on the index itself was sufficient.)
*/
CacheInvalidateRelcacheByRelid(heaprelid.relId);
/*
* Last thing to do is release the session-level lock on the parent table.
*/
UnlockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
pgstat_progress_end_command();
return address;
}
/*
* CheckMutability
* Test whether given expression is mutable
*/
static bool
CheckMutability(Expr *expr)
{
/*
* First run the expression through the planner. This has a couple of
* important consequences. First, function default arguments will get
* inserted, which may affect volatility (consider "default now()").
* Second, inline-able functions will get inlined, which may allow us to
* conclude that the function is really less volatile than it's marked. As
* an example, polymorphic functions must be marked with the most volatile
* behavior that they have for any input type, but once we inline the
* function we may be able to conclude that it's not so volatile for the
* particular input type we're dealing with.
*
* We assume here that expression_planner() won't scribble on its input.
*/
expr = expression_planner(expr);
/* Now we can search for non-immutable functions */
return contain_mutable_functions((Node *) expr);
}
/*
* CheckPredicate
* Checks that the given partial-index predicate is valid.
*
* This used to also constrain the form of the predicate to forms that
* indxpath.c could do something with. However, that seems overly
* restrictive. One useful application of partial indexes is to apply
* a UNIQUE constraint across a subset of a table, and in that scenario
* any evaluable predicate will work. So accept any predicate here
* (except ones requiring a plan), and let indxpath.c fend for itself.
*/
static void
CheckPredicate(Expr *predicate)
{
/*
* transformExpr() should have already rejected subqueries, aggregates,
* and window functions, based on the EXPR_KIND_ for a predicate.
*/
/*
* A predicate using mutable functions is probably wrong, for the same
* reasons that we don't allow an index expression to use one.
*/
if (CheckMutability(predicate))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("functions in index predicate must be marked IMMUTABLE")));
}
/*
* Compute per-index-column information, including indexed column numbers
Implement operator class parameters PostgreSQL provides set of template index access methods, where opclasses have much freedom in the semantics of indexing. These index AMs are GiST, GIN, SP-GiST and BRIN. There opclasses define representation of keys, operations on them and supported search strategies. So, it's natural that opclasses may be faced some tradeoffs, which require user-side decision. This commit implements opclass parameters allowing users to set some values, which tell opclass how to index the particular dataset. This commit doesn't introduce new storage in system catalog. Instead it uses pg_attribute.attoptions, which is used for table column storage options but unused for index attributes. In order to evade changing signature of each opclass support function, we implement unified way to pass options to opclass support functions. Options are set to fn_expr as the constant bytea expression. It's possible due to the fact that opclass support functions are executed outside of expressions, so fn_expr is unused for them. This commit comes with some examples of opclass options usage. We parametrize signature length in GiST. That applies to multiple opclasses: tsvector_ops, gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and gist_hstore_ops. Also we parametrize maximum number of integer ranges for gist__int_ops. However, the main future usage of this feature is expected to be json, where users would be able to specify which way to index particular json parts. Catversion is bumped. Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru Author: Nikita Glukhov, revised by me Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
* or index expressions, opclasses and their options. Note, all output vectors
* should be allocated for all columns, including "including" ones.
*
* If the caller switched to the table owner, ddl_userid is the role for ACL
* checks reached without traversing opaque expressions. Otherwise, it's
* InvalidOid, and other ddl_* arguments are undefined.
*/
static void
ComputeIndexAttrs(IndexInfo *indexInfo,
Oid *typeOidP,
Oid *collationOidP,
Oid *classOidP,
int16 *colOptionP,
List *attList, /* list of IndexElem's */
List *exclusionOpNames,
Oid relId,
const char *accessMethodName,
Oid accessMethodId,
bool amcanorder,
bool isconstraint,
Oid ddl_userid,
int ddl_sec_context,
int *ddl_save_nestlevel)
{
ListCell *nextExclOp;
ListCell *lc;
int attn;
int nkeycols = indexInfo->ii_NumIndexKeyAttrs;
Oid save_userid;
int save_sec_context;
/* Allocate space for exclusion operator info, if needed */
if (exclusionOpNames)
{
Assert(list_length(exclusionOpNames) == nkeycols);
indexInfo->ii_ExclusionOps = palloc_array(Oid, nkeycols);
indexInfo->ii_ExclusionProcs = palloc_array(Oid, nkeycols);
indexInfo->ii_ExclusionStrats = palloc_array(uint16, nkeycols);
nextExclOp = list_head(exclusionOpNames);
}
else
nextExclOp = NULL;
if (OidIsValid(ddl_userid))
GetUserIdAndSecContext(&save_userid, &save_sec_context);
/*
* process attributeList
*/
attn = 0;
foreach(lc, attList)
{
IndexElem *attribute = (IndexElem *) lfirst(lc);
Oid atttype;
Oid attcollation;
/*
* Process the column-or-expression to be indexed.
*/
if (attribute->name != NULL)
{
/* Simple index attribute */
HeapTuple atttuple;
Form_pg_attribute attform;
Assert(attribute->expr == NULL);
atttuple = SearchSysCacheAttName(relId, attribute->name);
if (!HeapTupleIsValid(atttuple))
{
/* difference in error message spellings is historical */
if (isconstraint)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" named in key does not exist",
attribute->name)));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" does not exist",
attribute->name)));
}
attform = (Form_pg_attribute) GETSTRUCT(atttuple);
indexInfo->ii_IndexAttrNumbers[attn] = attform->attnum;
atttype = attform->atttypid;
attcollation = attform->attcollation;
ReleaseSysCache(atttuple);
}
else
{
/* Index expression */
Node *expr = attribute->expr;
Assert(expr != NULL);
if (attn >= nkeycols)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("expressions are not supported in included columns")));
atttype = exprType(expr);
attcollation = exprCollation(expr);
/*
* Strip any top-level COLLATE clause. This ensures that we treat
* "x COLLATE y" and "(x COLLATE y)" alike.
*/
while (IsA(expr, CollateExpr))
expr = (Node *) ((CollateExpr *) expr)->arg;
if (IsA(expr, Var) &&
((Var *) expr)->varattno != InvalidAttrNumber)
{
/*
* User wrote "(column)" or "(column COLLATE something)".
* Treat it like simple attribute anyway.
*/
indexInfo->ii_IndexAttrNumbers[attn] = ((Var *) expr)->varattno;
}
else
{
indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
expr);
/*
* transformExpr() should have already rejected subqueries,
* aggregates, and window functions, based on the EXPR_KIND_
* for an index expression.
*/
/*
* An expression using mutable functions is probably wrong,
* since if you aren't going to get the same result for the
* same data every time, it's not clear what the index entries
* mean at all.
*/
if (CheckMutability((Expr *) expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("functions in index expression must be marked IMMUTABLE")));
}
}
typeOidP[attn] = atttype;
/*
* Included columns have no collation, no opclass and no ordering
* options.
*/
if (attn >= nkeycols)
{
if (attribute->collation)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("including column does not support a collation")));
if (attribute->opclass)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("including column does not support an operator class")));
if (attribute->ordering != SORTBY_DEFAULT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("including column does not support ASC/DESC options")));
if (attribute->nulls_ordering != SORTBY_NULLS_DEFAULT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("including column does not support NULLS FIRST/LAST options")));
classOidP[attn] = InvalidOid;
colOptionP[attn] = 0;
collationOidP[attn] = InvalidOid;
attn++;
continue;
}
/*
* Apply collation override if any. Use of ddl_userid is necessary
* due to ACL checks therein, and it's safe because collations don't
* contain opaque expressions (or non-opaque expressions).
*/
if (attribute->collation)
{
if (OidIsValid(ddl_userid))
{
AtEOXact_GUC(false, *ddl_save_nestlevel);
SetUserIdAndSecContext(ddl_userid, ddl_sec_context);
}
attcollation = get_collation_oid(attribute->collation, false);
if (OidIsValid(ddl_userid))
{
SetUserIdAndSecContext(save_userid, save_sec_context);
*ddl_save_nestlevel = NewGUCNestLevel();
}
}
/*
* Check we have a collation iff it's a collatable type. The only
* expected failures here are (1) COLLATE applied to a noncollatable
* type, or (2) index expression had an unresolved collation. But we
* might as well code this to be a complete consistency check.
*/
if (type_is_collatable(atttype))
{
if (!OidIsValid(attcollation))
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
errmsg("could not determine which collation to use for index expression"),
errhint("Use the COLLATE clause to set the collation explicitly.")));
}
else
{
if (OidIsValid(attcollation))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("collations are not supported by type %s",
format_type_be(atttype))));
}
collationOidP[attn] = attcollation;
/*
* Identify the opclass to use. Use of ddl_userid is necessary due to
* ACL checks therein. This is safe despite opclasses containing
* opaque expressions (specifically, functions), because only
* superusers can define opclasses.
*/
if (OidIsValid(ddl_userid))
{
AtEOXact_GUC(false, *ddl_save_nestlevel);
SetUserIdAndSecContext(ddl_userid, ddl_sec_context);
}
Implement table partitioning. Table partitioning is like table inheritance and reuses much of the existing infrastructure, but there are some important differences. The parent is called a partitioned table and is always empty; it may not have indexes or non-inherited constraints, since those make no sense for a relation with no data of its own. The children are called partitions and contain all of the actual data. Each partition has an implicit partitioning constraint. Multiple inheritance is not allowed, and partitioning and inheritance can't be mixed. Partitions can't have extra columns and may not allow nulls unless the parent does. Tuples inserted into the parent are automatically routed to the correct partition, so tuple-routing ON INSERT triggers are not needed. Tuple routing isn't yet supported for partitions which are foreign tables, and it doesn't handle updates that cross partition boundaries. Currently, tables can be range-partitioned or list-partitioned. List partitioning is limited to a single column, but range partitioning can involve multiple columns. A partitioning "column" can be an expression. Because table partitioning is less general than table inheritance, it is hoped that it will be easier to reason about properties of partitions, and therefore that this will serve as a better foundation for a variety of possible optimizations, including query planner optimizations. The tuple routing based which this patch does based on the implicit partitioning constraints is an example of this, but it seems likely that many other useful optimizations are also possible. Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat, Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova, Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
classOidP[attn] = ResolveOpClass(attribute->opclass,
atttype,
accessMethodName,
accessMethodId);
if (OidIsValid(ddl_userid))
{
SetUserIdAndSecContext(save_userid, save_sec_context);
*ddl_save_nestlevel = NewGUCNestLevel();
}
/*
* Identify the exclusion operator, if any.
*/
if (nextExclOp)
{
List *opname = (List *) lfirst(nextExclOp);
Oid opid;
Oid opfamily;
int strat;
/*
* Find the operator --- it must accept the column datatype
* without runtime coercion (but binary compatibility is OK).
* Operators contain opaque expressions (specifically, functions).
* compatible_oper_opid() boils down to oper() and
* IsBinaryCoercible(). PostgreSQL would have security problems
* elsewhere if oper() started calling opaque expressions.
*/
if (OidIsValid(ddl_userid))
{
AtEOXact_GUC(false, *ddl_save_nestlevel);
SetUserIdAndSecContext(ddl_userid, ddl_sec_context);
}
opid = compatible_oper_opid(opname, atttype, atttype, false);
if (OidIsValid(ddl_userid))
{
SetUserIdAndSecContext(save_userid, save_sec_context);
*ddl_save_nestlevel = NewGUCNestLevel();
}
/*
* Only allow commutative operators to be used in exclusion
* constraints. If X conflicts with Y, but Y does not conflict
* with X, bad things will happen.
*/
if (get_commutator(opid) != opid)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("operator %s is not commutative",
format_operator(opid)),
errdetail("Only commutative operators can be used in exclusion constraints.")));
/*
* Operator must be a member of the right opfamily, too
*/
opfamily = get_opclass_family(classOidP[attn]);
strat = get_op_opfamily_strategy(opid, opfamily);
if (strat == 0)
{
HeapTuple opftuple;
Form_pg_opfamily opfform;
/*
* attribute->opclass might not explicitly name the opfamily,
* so fetch the name of the selected opfamily for use in the
* error message.
*/
opftuple = SearchSysCache1(OPFAMILYOID,
ObjectIdGetDatum(opfamily));
if (!HeapTupleIsValid(opftuple))
elog(ERROR, "cache lookup failed for opfamily %u",
opfamily);
opfform = (Form_pg_opfamily) GETSTRUCT(opftuple);
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("operator %s is not a member of operator family \"%s\"",
format_operator(opid),
NameStr(opfform->opfname)),
errdetail("The exclusion operator must be related to the index operator class for the constraint.")));
}
indexInfo->ii_ExclusionOps[attn] = opid;
indexInfo->ii_ExclusionProcs[attn] = get_opcode(opid);
indexInfo->ii_ExclusionStrats[attn] = strat;
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
nextExclOp = lnext(exclusionOpNames, nextExclOp);
}
/*
* Set up the per-column options (indoption field). For now, this is
* zero for any un-ordered index, while ordered indexes have DESC and
* NULLS FIRST/LAST options.
*/
colOptionP[attn] = 0;
if (amcanorder)
{
/* default ordering is ASC */
if (attribute->ordering == SORTBY_DESC)
colOptionP[attn] |= INDOPTION_DESC;
/* default null ordering is LAST for ASC, FIRST for DESC */
if (attribute->nulls_ordering == SORTBY_NULLS_DEFAULT)
{
if (attribute->ordering == SORTBY_DESC)
colOptionP[attn] |= INDOPTION_NULLS_FIRST;
}
else if (attribute->nulls_ordering == SORTBY_NULLS_FIRST)
colOptionP[attn] |= INDOPTION_NULLS_FIRST;
}
else
{
/* index AM does not support ordering */
if (attribute->ordering != SORTBY_DEFAULT)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("access method \"%s\" does not support ASC/DESC options",
accessMethodName)));
if (attribute->nulls_ordering != SORTBY_NULLS_DEFAULT)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("access method \"%s\" does not support NULLS FIRST/LAST options",
accessMethodName)));
}
Implement operator class parameters PostgreSQL provides set of template index access methods, where opclasses have much freedom in the semantics of indexing. These index AMs are GiST, GIN, SP-GiST and BRIN. There opclasses define representation of keys, operations on them and supported search strategies. So, it's natural that opclasses may be faced some tradeoffs, which require user-side decision. This commit implements opclass parameters allowing users to set some values, which tell opclass how to index the particular dataset. This commit doesn't introduce new storage in system catalog. Instead it uses pg_attribute.attoptions, which is used for table column storage options but unused for index attributes. In order to evade changing signature of each opclass support function, we implement unified way to pass options to opclass support functions. Options are set to fn_expr as the constant bytea expression. It's possible due to the fact that opclass support functions are executed outside of expressions, so fn_expr is unused for them. This commit comes with some examples of opclass options usage. We parametrize signature length in GiST. That applies to multiple opclasses: tsvector_ops, gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and gist_hstore_ops. Also we parametrize maximum number of integer ranges for gist__int_ops. However, the main future usage of this feature is expected to be json, where users would be able to specify which way to index particular json parts. Catversion is bumped. Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru Author: Nikita Glukhov, revised by me Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
/* Set up the per-column opclass options (attoptions field). */
if (attribute->opclassopts)
{
Assert(attn < nkeycols);
if (!indexInfo->ii_OpclassOptions)
indexInfo->ii_OpclassOptions =
palloc0_array(Datum, indexInfo->ii_NumIndexAttrs);
Implement operator class parameters PostgreSQL provides set of template index access methods, where opclasses have much freedom in the semantics of indexing. These index AMs are GiST, GIN, SP-GiST and BRIN. There opclasses define representation of keys, operations on them and supported search strategies. So, it's natural that opclasses may be faced some tradeoffs, which require user-side decision. This commit implements opclass parameters allowing users to set some values, which tell opclass how to index the particular dataset. This commit doesn't introduce new storage in system catalog. Instead it uses pg_attribute.attoptions, which is used for table column storage options but unused for index attributes. In order to evade changing signature of each opclass support function, we implement unified way to pass options to opclass support functions. Options are set to fn_expr as the constant bytea expression. It's possible due to the fact that opclass support functions are executed outside of expressions, so fn_expr is unused for them. This commit comes with some examples of opclass options usage. We parametrize signature length in GiST. That applies to multiple opclasses: tsvector_ops, gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and gist_hstore_ops. Also we parametrize maximum number of integer ranges for gist__int_ops. However, the main future usage of this feature is expected to be json, where users would be able to specify which way to index particular json parts. Catversion is bumped. Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru Author: Nikita Glukhov, revised by me Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
indexInfo->ii_OpclassOptions[attn] =
transformRelOptions((Datum) 0, attribute->opclassopts,
NULL, NULL, false, false);
}
attn++;
}
}
/*
* Resolve possibly-defaulted operator class specification
Implement table partitioning. Table partitioning is like table inheritance and reuses much of the existing infrastructure, but there are some important differences. The parent is called a partitioned table and is always empty; it may not have indexes or non-inherited constraints, since those make no sense for a relation with no data of its own. The children are called partitions and contain all of the actual data. Each partition has an implicit partitioning constraint. Multiple inheritance is not allowed, and partitioning and inheritance can't be mixed. Partitions can't have extra columns and may not allow nulls unless the parent does. Tuples inserted into the parent are automatically routed to the correct partition, so tuple-routing ON INSERT triggers are not needed. Tuple routing isn't yet supported for partitions which are foreign tables, and it doesn't handle updates that cross partition boundaries. Currently, tables can be range-partitioned or list-partitioned. List partitioning is limited to a single column, but range partitioning can involve multiple columns. A partitioning "column" can be an expression. Because table partitioning is less general than table inheritance, it is hoped that it will be easier to reason about properties of partitions, and therefore that this will serve as a better foundation for a variety of possible optimizations, including query planner optimizations. The tuple routing based which this patch does based on the implicit partitioning constraints is an example of this, but it seems likely that many other useful optimizations are also possible. Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat, Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova, Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
*
* Note: This is used to resolve operator class specifications in index and
Implement table partitioning. Table partitioning is like table inheritance and reuses much of the existing infrastructure, but there are some important differences. The parent is called a partitioned table and is always empty; it may not have indexes or non-inherited constraints, since those make no sense for a relation with no data of its own. The children are called partitions and contain all of the actual data. Each partition has an implicit partitioning constraint. Multiple inheritance is not allowed, and partitioning and inheritance can't be mixed. Partitions can't have extra columns and may not allow nulls unless the parent does. Tuples inserted into the parent are automatically routed to the correct partition, so tuple-routing ON INSERT triggers are not needed. Tuple routing isn't yet supported for partitions which are foreign tables, and it doesn't handle updates that cross partition boundaries. Currently, tables can be range-partitioned or list-partitioned. List partitioning is limited to a single column, but range partitioning can involve multiple columns. A partitioning "column" can be an expression. Because table partitioning is less general than table inheritance, it is hoped that it will be easier to reason about properties of partitions, and therefore that this will serve as a better foundation for a variety of possible optimizations, including query planner optimizations. The tuple routing based which this patch does based on the implicit partitioning constraints is an example of this, but it seems likely that many other useful optimizations are also possible. Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat, Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova, Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
* partition key definitions.
*/
Implement table partitioning. Table partitioning is like table inheritance and reuses much of the existing infrastructure, but there are some important differences. The parent is called a partitioned table and is always empty; it may not have indexes or non-inherited constraints, since those make no sense for a relation with no data of its own. The children are called partitions and contain all of the actual data. Each partition has an implicit partitioning constraint. Multiple inheritance is not allowed, and partitioning and inheritance can't be mixed. Partitions can't have extra columns and may not allow nulls unless the parent does. Tuples inserted into the parent are automatically routed to the correct partition, so tuple-routing ON INSERT triggers are not needed. Tuple routing isn't yet supported for partitions which are foreign tables, and it doesn't handle updates that cross partition boundaries. Currently, tables can be range-partitioned or list-partitioned. List partitioning is limited to a single column, but range partitioning can involve multiple columns. A partitioning "column" can be an expression. Because table partitioning is less general than table inheritance, it is hoped that it will be easier to reason about properties of partitions, and therefore that this will serve as a better foundation for a variety of possible optimizations, including query planner optimizations. The tuple routing based which this patch does based on the implicit partitioning constraints is an example of this, but it seems likely that many other useful optimizations are also possible. Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat, Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova, Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
Oid
ResolveOpClass(List *opclass, Oid attrType,
const char *accessMethodName, Oid accessMethodId)
{
char *schemaname;
char *opcname;
HeapTuple tuple;
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
Form_pg_opclass opform;
Oid opClassId,
opInputType;
if (opclass == NIL)
{
/* no operator class specified, so find the default */
opClassId = GetDefaultOpClass(attrType, accessMethodId);
if (!OidIsValid(opClassId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("data type %s has no default operator class for access method \"%s\"",
format_type_be(attrType), accessMethodName),
errhint("You must specify an operator class for the index or define a default operator class for the data type.")));
return opClassId;
}
/*
* Specific opclass name given, so look up the opclass.
*/
/* deconstruct the name list */
DeconstructQualifiedName(opclass, &schemaname, &opcname);
if (schemaname)
{
/* Look in specific schema only */
Oid namespaceId;
namespaceId = LookupExplicitNamespace(schemaname, false);
tuple = SearchSysCache3(CLAAMNAMENSP,
ObjectIdGetDatum(accessMethodId),
PointerGetDatum(opcname),
ObjectIdGetDatum(namespaceId));
}
else
{
/* Unqualified opclass name, so search the search path */
opClassId = OpclassnameGetOpcid(accessMethodId, opcname);
if (!OidIsValid(opClassId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
opcname, accessMethodName)));
tuple = SearchSysCache1(CLAOID, ObjectIdGetDatum(opClassId));
}
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
NameListToString(opclass), accessMethodName)));
/*
* Verify that the index operator class accepts this datatype. Note we
* will accept binary compatibility.
*/
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
opform = (Form_pg_opclass) GETSTRUCT(tuple);
opClassId = opform->oid;
opInputType = opform->opcintype;
if (!IsBinaryCoercible(attrType, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("operator class \"%s\" does not accept data type %s",
NameListToString(opclass), format_type_be(attrType))));
ReleaseSysCache(tuple);
return opClassId;
}
/*
* GetDefaultOpClass
*
* Given the OIDs of a datatype and an access method, find the default
* operator class, if any. Returns InvalidOid if there is none.
*/
Oid
GetDefaultOpClass(Oid type_id, Oid am_id)
{
Oid result = InvalidOid;
int nexact = 0;
int ncompatible = 0;
int ncompatiblepreferred = 0;
Relation rel;
ScanKeyData skey[1];
SysScanDesc scan;
HeapTuple tup;
TYPCATEGORY tcategory;
/* If it's a domain, look at the base type instead */
type_id = getBaseType(type_id);
tcategory = TypeCategory(type_id);
/*
* We scan through all the opclasses available for the access method,
* looking for one that is marked default and matches the target type
* (either exactly or binary-compatibly, but prefer an exact match).
*
* We could find more than one binary-compatible match. If just one is
* for a preferred type, use that one; otherwise we fail, forcing the user
* to specify which one he wants. (The preferred-type special case is a
* kluge for varchar: it's binary-compatible to both text and bpchar, so
* we need a tiebreaker.) If we find more than one exact match, then
* someone put bogus entries in pg_opclass.
*/
rel = table_open(OperatorClassRelationId, AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_opclass_opcmethod,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(am_id));
scan = systable_beginscan(rel, OpclassAmNameNspIndexId, true,
NULL, 1, skey);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_opclass opclass = (Form_pg_opclass) GETSTRUCT(tup);
/* ignore altogether if not a default opclass */
if (!opclass->opcdefault)
continue;
if (opclass->opcintype == type_id)
{
nexact++;
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
result = opclass->oid;
}
else if (nexact == 0 &&
IsBinaryCoercible(type_id, opclass->opcintype))
{
if (IsPreferredType(tcategory, opclass->opcintype))
{
ncompatiblepreferred++;
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
result = opclass->oid;
}
else if (ncompatiblepreferred == 0)
{
ncompatible++;
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
result = opclass->oid;
}
}
}
systable_endscan(scan);
table_close(rel, AccessShareLock);
/* raise error if pg_opclass contains inconsistent data */
if (nexact > 1)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("there are multiple default operator classes for data type %s",
format_type_be(type_id))));
if (nexact == 1 ||
ncompatiblepreferred == 1 ||
(ncompatiblepreferred == 0 && ncompatible == 1))
return result;
return InvalidOid;
}
/*
* makeObjectName()
*
* Create a name for an implicitly created index, sequence, constraint,
* extended statistics, etc.
*
* The parameters are typically: the original table name, the original field
* name, and a "type" string (such as "seq" or "pkey"). The field name
* and/or type can be NULL if not relevant.
*
* The result is a palloc'd string.
*
* The basic result we want is "name1_name2_label", omitting "_name2" or
* "_label" when those parameters are NULL. However, we must generate
* a name with less than NAMEDATALEN characters! So, we truncate one or
* both names if necessary to make a short-enough string. The label part
* is never truncated (so it had better be reasonably short).
*
* The caller is responsible for checking uniqueness of the generated
* name and retrying as needed; retrying will be done by altering the
* "label" string (which is why we never truncate that part).
*/
char *
makeObjectName(const char *name1, const char *name2, const char *label)
{
char *name;
int overhead = 0; /* chars needed for label and underscores */
int availchars; /* chars available for name(s) */
int name1chars; /* chars allocated to name1 */
int name2chars; /* chars allocated to name2 */
int ndx;
name1chars = strlen(name1);
if (name2)
{
name2chars = strlen(name2);
overhead++; /* allow for separating underscore */
}
else
name2chars = 0;
if (label)
overhead += strlen(label) + 1;
availchars = NAMEDATALEN - 1 - overhead;
Assert(availchars > 0); /* else caller chose a bad label */
/*
* If we must truncate, preferentially truncate the longer name. This
* logic could be expressed without a loop, but it's simple and obvious as
* a loop.
*/
while (name1chars + name2chars > availchars)
{
if (name1chars > name2chars)
name1chars--;
else
name2chars--;
}
name1chars = pg_mbcliplen(name1, name1chars, name1chars);
if (name2)
name2chars = pg_mbcliplen(name2, name2chars, name2chars);
/* Now construct the string using the chosen lengths */
name = palloc(name1chars + name2chars + overhead + 1);
memcpy(name, name1, name1chars);
ndx = name1chars;
if (name2)
{
name[ndx++] = '_';
memcpy(name + ndx, name2, name2chars);
ndx += name2chars;
}
if (label)
{
name[ndx++] = '_';
strcpy(name + ndx, label);
}
else
name[ndx] = '\0';
return name;
}
/*
* Select a nonconflicting name for a new relation. This is ordinarily
* used to choose index names (which is why it's here) but it can also
* be used for sequences, or any autogenerated relation kind.
*
* name1, name2, and label are used the same way as for makeObjectName(),
* except that the label can't be NULL; digits will be appended to the label
* if needed to create a name that is unique within the specified namespace.
*
Fully enforce uniqueness of constraint names. It's been true for a long time that we expect names of table and domain constraints to be unique among the constraints of that table or domain. However, the enforcement of that has been pretty haphazard, and it missed some corner cases such as creating a CHECK constraint and then an index constraint of the same name (as per recent report from André Hänsel). Also, due to the lack of an actual unique index enforcing this, duplicates could be created through race conditions. Moreover, the code that searches pg_constraint has been quite inconsistent about how to handle duplicate names if one did occur: some places checked and threw errors if there was more than one match, while others just processed the first match they came to. To fix, create a unique index on (conrelid, contypid, conname). Since either conrelid or contypid is zero, this will separately enforce uniqueness of constraint names among constraints of any one table and any one domain. (If we ever implement SQL assertions, and put them into this catalog, more thought might be needed. But it'd be at least as reasonable to put them into a new catalog; having overloaded this one catalog with two kinds of constraints was a mistake already IMO.) This index can replace the existing non-unique index on conrelid, though we need to keep the one on contypid for query performance reasons. Having done that, we can simplify the logic in various places that either coped with duplicates or neglected to, as well as potentially improve lookup performance when searching for a constraint by name. Also, as per our usual practice, install a preliminary check so that you get something more friendly than a unique-index violation report in the case complained of by André. And teach ChooseIndexName to avoid choosing autogenerated names that would draw such a failure. While it's not possible to make such a change in the back branches, it doesn't seem quite too late to put this into v11, so do so. Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
* If isconstraint is true, we also avoid choosing a name matching any
* existing constraint in the same namespace. (This is stricter than what
* Postgres itself requires, but the SQL standard says that constraint names
* should be unique within schemas, so we follow that for autogenerated
* constraint names.)
*
* Note: it is theoretically possible to get a collision anyway, if someone
* else chooses the same name concurrently. This is fairly unlikely to be
* a problem in practice, especially if one is holding an exclusive lock on
* the relation identified by name1. However, if choosing multiple names
* within a single command, you'd better create the new object and do
* CommandCounterIncrement before choosing the next one!
*
* Returns a palloc'd string.
*/
char *
ChooseRelationName(const char *name1, const char *name2,
Fully enforce uniqueness of constraint names. It's been true for a long time that we expect names of table and domain constraints to be unique among the constraints of that table or domain. However, the enforcement of that has been pretty haphazard, and it missed some corner cases such as creating a CHECK constraint and then an index constraint of the same name (as per recent report from André Hänsel). Also, due to the lack of an actual unique index enforcing this, duplicates could be created through race conditions. Moreover, the code that searches pg_constraint has been quite inconsistent about how to handle duplicate names if one did occur: some places checked and threw errors if there was more than one match, while others just processed the first match they came to. To fix, create a unique index on (conrelid, contypid, conname). Since either conrelid or contypid is zero, this will separately enforce uniqueness of constraint names among constraints of any one table and any one domain. (If we ever implement SQL assertions, and put them into this catalog, more thought might be needed. But it'd be at least as reasonable to put them into a new catalog; having overloaded this one catalog with two kinds of constraints was a mistake already IMO.) This index can replace the existing non-unique index on conrelid, though we need to keep the one on contypid for query performance reasons. Having done that, we can simplify the logic in various places that either coped with duplicates or neglected to, as well as potentially improve lookup performance when searching for a constraint by name. Also, as per our usual practice, install a preliminary check so that you get something more friendly than a unique-index violation report in the case complained of by André. And teach ChooseIndexName to avoid choosing autogenerated names that would draw such a failure. While it's not possible to make such a change in the back branches, it doesn't seem quite too late to put this into v11, so do so. Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
const char *label, Oid namespaceid,
bool isconstraint)
{
int pass = 0;
char *relname = NULL;
char modlabel[NAMEDATALEN];
/* try the unmodified label first */
strlcpy(modlabel, label, sizeof(modlabel));
for (;;)
{
relname = makeObjectName(name1, name2, modlabel);
if (!OidIsValid(get_relname_relid(relname, namespaceid)))
Fully enforce uniqueness of constraint names. It's been true for a long time that we expect names of table and domain constraints to be unique among the constraints of that table or domain. However, the enforcement of that has been pretty haphazard, and it missed some corner cases such as creating a CHECK constraint and then an index constraint of the same name (as per recent report from André Hänsel). Also, due to the lack of an actual unique index enforcing this, duplicates could be created through race conditions. Moreover, the code that searches pg_constraint has been quite inconsistent about how to handle duplicate names if one did occur: some places checked and threw errors if there was more than one match, while others just processed the first match they came to. To fix, create a unique index on (conrelid, contypid, conname). Since either conrelid or contypid is zero, this will separately enforce uniqueness of constraint names among constraints of any one table and any one domain. (If we ever implement SQL assertions, and put them into this catalog, more thought might be needed. But it'd be at least as reasonable to put them into a new catalog; having overloaded this one catalog with two kinds of constraints was a mistake already IMO.) This index can replace the existing non-unique index on conrelid, though we need to keep the one on contypid for query performance reasons. Having done that, we can simplify the logic in various places that either coped with duplicates or neglected to, as well as potentially improve lookup performance when searching for a constraint by name. Also, as per our usual practice, install a preliminary check so that you get something more friendly than a unique-index violation report in the case complained of by André. And teach ChooseIndexName to avoid choosing autogenerated names that would draw such a failure. While it's not possible to make such a change in the back branches, it doesn't seem quite too late to put this into v11, so do so. Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
{
if (!isconstraint ||
!ConstraintNameExists(relname, namespaceid))
break;
}
/* found a conflict, so try a new name component */
pfree(relname);
snprintf(modlabel, sizeof(modlabel), "%s%d", label, ++pass);
}
return relname;
}
/*
* Select the name to be used for an index.
*
* The argument list is pretty ad-hoc :-(
*/
static char *
ChooseIndexName(const char *tabname, Oid namespaceId,
List *colnames, List *exclusionOpNames,
bool primary, bool isconstraint)
{
char *indexname;
if (primary)
{
/* the primary key's name does not depend on the specific column(s) */
indexname = ChooseRelationName(tabname,
NULL,
"pkey",
Fully enforce uniqueness of constraint names. It's been true for a long time that we expect names of table and domain constraints to be unique among the constraints of that table or domain. However, the enforcement of that has been pretty haphazard, and it missed some corner cases such as creating a CHECK constraint and then an index constraint of the same name (as per recent report from André Hänsel). Also, due to the lack of an actual unique index enforcing this, duplicates could be created through race conditions. Moreover, the code that searches pg_constraint has been quite inconsistent about how to handle duplicate names if one did occur: some places checked and threw errors if there was more than one match, while others just processed the first match they came to. To fix, create a unique index on (conrelid, contypid, conname). Since either conrelid or contypid is zero, this will separately enforce uniqueness of constraint names among constraints of any one table and any one domain. (If we ever implement SQL assertions, and put them into this catalog, more thought might be needed. But it'd be at least as reasonable to put them into a new catalog; having overloaded this one catalog with two kinds of constraints was a mistake already IMO.) This index can replace the existing non-unique index on conrelid, though we need to keep the one on contypid for query performance reasons. Having done that, we can simplify the logic in various places that either coped with duplicates or neglected to, as well as potentially improve lookup performance when searching for a constraint by name. Also, as per our usual practice, install a preliminary check so that you get something more friendly than a unique-index violation report in the case complained of by André. And teach ChooseIndexName to avoid choosing autogenerated names that would draw such a failure. While it's not possible to make such a change in the back branches, it doesn't seem quite too late to put this into v11, so do so. Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
namespaceId,
true);
}
else if (exclusionOpNames != NIL)
{
indexname = ChooseRelationName(tabname,
ChooseIndexNameAddition(colnames),
"excl",
Fully enforce uniqueness of constraint names. It's been true for a long time that we expect names of table and domain constraints to be unique among the constraints of that table or domain. However, the enforcement of that has been pretty haphazard, and it missed some corner cases such as creating a CHECK constraint and then an index constraint of the same name (as per recent report from André Hänsel). Also, due to the lack of an actual unique index enforcing this, duplicates could be created through race conditions. Moreover, the code that searches pg_constraint has been quite inconsistent about how to handle duplicate names if one did occur: some places checked and threw errors if there was more than one match, while others just processed the first match they came to. To fix, create a unique index on (conrelid, contypid, conname). Since either conrelid or contypid is zero, this will separately enforce uniqueness of constraint names among constraints of any one table and any one domain. (If we ever implement SQL assertions, and put them into this catalog, more thought might be needed. But it'd be at least as reasonable to put them into a new catalog; having overloaded this one catalog with two kinds of constraints was a mistake already IMO.) This index can replace the existing non-unique index on conrelid, though we need to keep the one on contypid for query performance reasons. Having done that, we can simplify the logic in various places that either coped with duplicates or neglected to, as well as potentially improve lookup performance when searching for a constraint by name. Also, as per our usual practice, install a preliminary check so that you get something more friendly than a unique-index violation report in the case complained of by André. And teach ChooseIndexName to avoid choosing autogenerated names that would draw such a failure. While it's not possible to make such a change in the back branches, it doesn't seem quite too late to put this into v11, so do so. Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
namespaceId,
true);
}
else if (isconstraint)
{
indexname = ChooseRelationName(tabname,
ChooseIndexNameAddition(colnames),
"key",
Fully enforce uniqueness of constraint names. It's been true for a long time that we expect names of table and domain constraints to be unique among the constraints of that table or domain. However, the enforcement of that has been pretty haphazard, and it missed some corner cases such as creating a CHECK constraint and then an index constraint of the same name (as per recent report from André Hänsel). Also, due to the lack of an actual unique index enforcing this, duplicates could be created through race conditions. Moreover, the code that searches pg_constraint has been quite inconsistent about how to handle duplicate names if one did occur: some places checked and threw errors if there was more than one match, while others just processed the first match they came to. To fix, create a unique index on (conrelid, contypid, conname). Since either conrelid or contypid is zero, this will separately enforce uniqueness of constraint names among constraints of any one table and any one domain. (If we ever implement SQL assertions, and put them into this catalog, more thought might be needed. But it'd be at least as reasonable to put them into a new catalog; having overloaded this one catalog with two kinds of constraints was a mistake already IMO.) This index can replace the existing non-unique index on conrelid, though we need to keep the one on contypid for query performance reasons. Having done that, we can simplify the logic in various places that either coped with duplicates or neglected to, as well as potentially improve lookup performance when searching for a constraint by name. Also, as per our usual practice, install a preliminary check so that you get something more friendly than a unique-index violation report in the case complained of by André. And teach ChooseIndexName to avoid choosing autogenerated names that would draw such a failure. While it's not possible to make such a change in the back branches, it doesn't seem quite too late to put this into v11, so do so. Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
namespaceId,
true);
}
else
{
indexname = ChooseRelationName(tabname,
ChooseIndexNameAddition(colnames),
"idx",
Fully enforce uniqueness of constraint names. It's been true for a long time that we expect names of table and domain constraints to be unique among the constraints of that table or domain. However, the enforcement of that has been pretty haphazard, and it missed some corner cases such as creating a CHECK constraint and then an index constraint of the same name (as per recent report from André Hänsel). Also, due to the lack of an actual unique index enforcing this, duplicates could be created through race conditions. Moreover, the code that searches pg_constraint has been quite inconsistent about how to handle duplicate names if one did occur: some places checked and threw errors if there was more than one match, while others just processed the first match they came to. To fix, create a unique index on (conrelid, contypid, conname). Since either conrelid or contypid is zero, this will separately enforce uniqueness of constraint names among constraints of any one table and any one domain. (If we ever implement SQL assertions, and put them into this catalog, more thought might be needed. But it'd be at least as reasonable to put them into a new catalog; having overloaded this one catalog with two kinds of constraints was a mistake already IMO.) This index can replace the existing non-unique index on conrelid, though we need to keep the one on contypid for query performance reasons. Having done that, we can simplify the logic in various places that either coped with duplicates or neglected to, as well as potentially improve lookup performance when searching for a constraint by name. Also, as per our usual practice, install a preliminary check so that you get something more friendly than a unique-index violation report in the case complained of by André. And teach ChooseIndexName to avoid choosing autogenerated names that would draw such a failure. While it's not possible to make such a change in the back branches, it doesn't seem quite too late to put this into v11, so do so. Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
namespaceId,
false);
}
return indexname;
}
/*
* Generate "name2" for a new index given the list of column names for it
* (as produced by ChooseIndexColumnNames). This will be passed to
* ChooseRelationName along with the parent table name and a suitable label.
*
* We know that less than NAMEDATALEN characters will actually be used,
* so we can truncate the result once we've generated that many.
*
* XXX See also ChooseForeignKeyConstraintNameAddition and
* ChooseExtendedStatisticNameAddition.
*/
static char *
ChooseIndexNameAddition(List *colnames)
{
char buf[NAMEDATALEN * 2];
int buflen = 0;
ListCell *lc;
buf[0] = '\0';
foreach(lc, colnames)
{
const char *name = (const char *) lfirst(lc);
if (buflen > 0)
buf[buflen++] = '_'; /* insert _ between names */
/*
* At this point we have buflen <= NAMEDATALEN. name should be less
* than NAMEDATALEN already, but use strlcpy for paranoia.
*/
strlcpy(buf + buflen, name, NAMEDATALEN);
buflen += strlen(buf + buflen);
if (buflen >= NAMEDATALEN)
break;
}
return pstrdup(buf);
}
/*
* Select the actual names to be used for the columns of an index, given the
* list of IndexElems for the columns. This is mostly about ensuring the
* names are unique so we don't get a conflicting-attribute-names error.
*
* Returns a List of plain strings (char *, not String nodes).
*/
static List *
ChooseIndexColumnNames(List *indexElems)
{
List *result = NIL;
ListCell *lc;
foreach(lc, indexElems)
{
IndexElem *ielem = (IndexElem *) lfirst(lc);
const char *origname;
const char *curname;
int i;
char buf[NAMEDATALEN];
/* Get the preliminary name from the IndexElem */
if (ielem->indexcolname)
origname = ielem->indexcolname; /* caller-specified name */
else if (ielem->name)
origname = ielem->name; /* simple column reference */
else
origname = "expr"; /* default name for expression */
/* If it conflicts with any previous column, tweak it */
curname = origname;
for (i = 1;; i++)
{
ListCell *lc2;
char nbuf[32];
int nlen;
foreach(lc2, result)
{
if (strcmp(curname, (char *) lfirst(lc2)) == 0)
break;
}
if (lc2 == NULL)
break; /* found nonconflicting name */
sprintf(nbuf, "%d", i);
/* Ensure generated names are shorter than NAMEDATALEN */
nlen = pg_mbcliplen(origname, strlen(origname),
NAMEDATALEN - 1 - strlen(nbuf));
memcpy(buf, origname, nlen);
strcpy(buf + nlen, nbuf);
curname = buf;
}
/* And attach to the result list */
result = lappend(result, pstrdup(curname));
}
return result;
}
/*
* ExecReindex
*
* Primary entry point for manual REINDEX commands. This is mainly a
* preparation wrapper for the real operations that will happen in
* each subroutine of REINDEX.
*/
void
ExecReindex(ParseState *pstate, ReindexStmt *stmt, bool isTopLevel)
{
ReindexParams params = {0};
ListCell *lc;
bool concurrently = false;
bool verbose = false;
char *tablespacename = NULL;
/* Parse option list */
foreach(lc, stmt->params)
{
DefElem *opt = (DefElem *) lfirst(lc);
if (strcmp(opt->defname, "verbose") == 0)
verbose = defGetBoolean(opt);
else if (strcmp(opt->defname, "concurrently") == 0)
concurrently = defGetBoolean(opt);
else if (strcmp(opt->defname, "tablespace") == 0)
tablespacename = defGetString(opt);
else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("unrecognized REINDEX option \"%s\"",
opt->defname),
parser_errposition(pstate, opt->location)));
}
if (concurrently)
PreventInTransactionBlock(isTopLevel,
"REINDEX CONCURRENTLY");
params.options =
(verbose ? REINDEXOPT_VERBOSE : 0) |
(concurrently ? REINDEXOPT_CONCURRENTLY : 0);
/*
* Assign the tablespace OID to move indexes to, with InvalidOid to do
* nothing.
*/
if (tablespacename != NULL)
{
params.tablespaceOid = get_tablespace_oid(tablespacename, false);
/* Check permissions except when moving to database's default */
if (OidIsValid(params.tablespaceOid) &&
params.tablespaceOid != MyDatabaseTableSpace)
{
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid,
GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params.tablespaceOid));
}
}
else
params.tablespaceOid = InvalidOid;
switch (stmt->kind)
{
case REINDEX_OBJECT_INDEX:
ReindexIndex(stmt->relation, &params, isTopLevel);
break;
case REINDEX_OBJECT_TABLE:
ReindexTable(stmt->relation, &params, isTopLevel);
break;
case REINDEX_OBJECT_SCHEMA:
case REINDEX_OBJECT_SYSTEM:
case REINDEX_OBJECT_DATABASE:
/*
* This cannot run inside a user transaction block; if we were
* inside a transaction, then its commit- and
* start-transaction-command calls would not have the intended
* effect!
*/
PreventInTransactionBlock(isTopLevel,
(stmt->kind == REINDEX_OBJECT_SCHEMA) ? "REINDEX SCHEMA" :
(stmt->kind == REINDEX_OBJECT_SYSTEM) ? "REINDEX SYSTEM" :
"REINDEX DATABASE");
ReindexMultipleTables(stmt->name, stmt->kind, &params);
break;
default:
elog(ERROR, "unrecognized object type: %d",
(int) stmt->kind);
break;
}
}
2000-02-18 10:30:20 +01:00
/*
* ReindexIndex
* Recreate a specific index.
2000-02-18 10:30:20 +01:00
*/
static void
ReindexIndex(RangeVar *indexRelation, ReindexParams *params, bool isTopLevel)
2000-02-18 10:30:20 +01:00
{
struct ReindexIndexCallbackState state;
Oid indOid;
char persistence;
2020-09-08 03:09:22 +02:00
char relkind;
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
/*
* Find and lock index, and check permissions on table; use callback to
* obtain lock on table first, to avoid deadlock hazard. The lock level
* used here must match the index lock obtained in reindex_index().
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
*
* If it's a temporary index, we will perform a non-concurrent reindex,
* even if CONCURRENTLY was requested. In that case, reindex_index() will
* upgrade the lock, but that's OK, because other sessions can't hold
* locks on our temporary table.
*/
state.params = *params;
state.locked_table_oid = InvalidOid;
indOid = RangeVarGetRelidExtended(indexRelation,
(params->options & REINDEXOPT_CONCURRENTLY) != 0 ?
ShareUpdateExclusiveLock : AccessExclusiveLock,
0,
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
RangeVarCallbackForReindexIndex,
&state);
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
/*
2020-09-08 03:09:22 +02:00
* Obtain the current persistence and kind of the existing index. We
* already hold a lock on the index.
*/
2020-09-08 03:09:22 +02:00
persistence = get_rel_persistence(indOid);
relkind = get_rel_relkind(indOid);
2020-09-08 03:09:22 +02:00
if (relkind == RELKIND_PARTITIONED_INDEX)
ReindexPartitions(indOid, params, isTopLevel);
else if ((params->options & REINDEXOPT_CONCURRENTLY) != 0 &&
2020-09-08 03:09:22 +02:00
persistence != RELPERSISTENCE_TEMP)
ReindexRelationConcurrently(indOid, params);
else
{
ReindexParams newparams = *params;
newparams.options |= REINDEXOPT_REPORT_PROGRESS;
reindex_index(indOid, false, persistence, &newparams);
}
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
}
/*
* Check permissions on table before acquiring relation lock; also lock
* the heap before the RangeVarGetRelidExtended takes the index lock, to avoid
* deadlocks.
*/
static void
RangeVarCallbackForReindexIndex(const RangeVar *relation,
Oid relId, Oid oldRelId, void *arg)
{
char relkind;
struct ReindexIndexCallbackState *state = arg;
LOCKMODE table_lockmode;
Oid table_oid;
/*
* Lock level here should match table lock in reindex_index() for
* non-concurrent case and table locks used by index_concurrently_*() for
* concurrent case.
*/
table_lockmode = (state->params.options & REINDEXOPT_CONCURRENTLY) != 0 ?
ShareUpdateExclusiveLock : ShareLock;
/*
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
* If we previously locked some other index's heap, and the name we're
* looking up no longer refers to that relation, release the now-useless
* lock.
*/
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
if (relId != oldRelId && OidIsValid(oldRelId))
{
UnlockRelationOid(state->locked_table_oid, table_lockmode);
state->locked_table_oid = InvalidOid;
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
}
/* If the relation does not exist, there's nothing more to do. */
if (!OidIsValid(relId))
return;
2000-02-18 10:30:20 +01:00
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
/*
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
* lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
return;
if (relkind != RELKIND_INDEX &&
relkind != RELKIND_PARTITIONED_INDEX)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
errmsg("\"%s\" is not an index", relation->relname)));
/* Check permissions */
table_oid = IndexGetRelation(relId, true);
if (OidIsValid(table_oid) &&
pg_class_aclcheck(table_oid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&
!has_partition_ancestor_privs(table_oid, GetUserId(), ACL_MAINTAIN))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX,
relation->relname);
2000-02-18 10:30:20 +01:00
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
/* Lock heap before index to avoid deadlock. */
if (relId != oldRelId)
{
/*
* If the OID isn't valid, it means the index was concurrently
* dropped, which is not a problem for us; just return normally.
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
*/
if (OidIsValid(table_oid))
{
LockRelationOid(table_oid, table_lockmode);
state->locked_table_oid = table_oid;
}
Improve table locking behavior in the face of current DDL. In the previous coding, callers were faced with an awkward choice: look up the name, do permissions checks, and then lock the table; or look up the name, lock the table, and then do permissions checks. The first choice was wrong because the results of the name lookup and permissions checks might be out-of-date by the time the table lock was acquired, while the second allowed a user with no privileges to interfere with access to a table by users who do have privileges (e.g. if a malicious backend queues up for an AccessExclusiveLock on a table on which AccessShareLock is already held, further attempts to access the table will be blocked until the AccessExclusiveLock is obtained and the malicious backend's transaction rolls back). To fix, allow callers of RangeVarGetRelid() to pass a callback which gets executed after performing the name lookup but before acquiring the relation lock. If the name lookup is retried (because invalidation messages are received), the callback will be re-executed as well, so we get the best of both worlds. RangeVarGetRelid() is renamed to RangeVarGetRelidExtended(); callers not wishing to supply a callback can continue to invoke it as RangeVarGetRelid(), which is now a macro. Since the only one caller that uses nowait = true now passes a callback anyway, the RangeVarGetRelid() macro defaults nowait as well. The callback can also be used for supplemental locking - for example, REINDEX INDEX needs to acquire the table lock before the index lock to reduce deadlock possibilities. There's a lot more work to be done here to fix all the cases where this can be a problem, but this commit provides the general infrastructure and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE, LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE. Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
}
2000-02-18 10:30:20 +01:00
}
/*
* ReindexTable
* Recreate all indexes of a table (and of its toast table, if any)
2000-02-18 10:30:20 +01:00
*/
static Oid
ReindexTable(RangeVar *relation, ReindexParams *params, bool isTopLevel)
2000-02-18 10:30:20 +01:00
{
Oid heapOid;
bool result;
2000-02-18 10:30:20 +01:00
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
/*
* The lock level used here should match reindex_relation().
*
* If it's a temporary table, we will perform a non-concurrent reindex,
* even if CONCURRENTLY was requested. In that case, reindex_relation()
* will upgrade the lock, but that's OK, because other sessions can't hold
* locks on our temporary table.
*/
heapOid = RangeVarGetRelidExtended(relation,
(params->options & REINDEXOPT_CONCURRENTLY) != 0 ?
ShareUpdateExclusiveLock : ShareLock,
0,
RangeVarCallbackMaintainsTable, NULL);
2020-09-08 03:09:22 +02:00
if (get_rel_relkind(heapOid) == RELKIND_PARTITIONED_TABLE)
ReindexPartitions(heapOid, params, isTopLevel);
else if ((params->options & REINDEXOPT_CONCURRENTLY) != 0 &&
2020-09-08 03:09:22 +02:00
get_rel_persistence(heapOid) != RELPERSISTENCE_TEMP)
{
result = ReindexRelationConcurrently(heapOid, params);
if (!result)
ereport(NOTICE,
(errmsg("table \"%s\" has no indexes that can be reindexed concurrently",
relation->relname)));
}
else
{
ReindexParams newparams = *params;
newparams.options |= REINDEXOPT_REPORT_PROGRESS;
result = reindex_relation(heapOid,
REINDEX_REL_PROCESS_TOAST |
REINDEX_REL_CHECK_CONSTRAINTS,
&newparams);
if (!result)
ereport(NOTICE,
(errmsg("table \"%s\" has no indexes to reindex",
relation->relname)));
}
return heapOid;
2000-02-18 10:30:20 +01:00
}
/*
* ReindexMultipleTables
* Recreate indexes of tables selected by objectName/objectKind.
*
* To reduce the probability of deadlocks, each table is reindexed in a
* separate transaction, so we can release the lock on it right away.
* That means this must not be called within a user transaction block!
2000-02-18 10:30:20 +01:00
*/
static void
ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
ReindexParams *params)
2000-02-18 10:30:20 +01:00
{
Oid objectOid;
Relation relationRelation;
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
TableScanDesc scan;
ScanKeyData scan_keys[1];
HeapTuple tuple;
MemoryContext private_context;
2000-02-18 10:30:20 +01:00
MemoryContext old;
List *relids = NIL;
int num_keys;
bool concurrent_warning = false;
bool tablespace_warning = false;
2000-02-18 10:30:20 +01:00
Assert(objectKind == REINDEX_OBJECT_SCHEMA ||
objectKind == REINDEX_OBJECT_SYSTEM ||
objectKind == REINDEX_OBJECT_DATABASE);
2000-02-18 10:30:20 +01:00
/*
* This matches the options enforced by the grammar, where the object name
* is optional for DATABASE and SYSTEM.
*/
Assert(objectName || objectKind != REINDEX_OBJECT_SCHEMA);
if (objectKind == REINDEX_OBJECT_SYSTEM &&
(params->options & REINDEXOPT_CONCURRENTLY) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot reindex system catalogs concurrently")));
/*
* Get OID of object to reindex, being the database currently being used
* by session for a database or for system catalogs, or the schema defined
* by caller. At the same time do permission checks that need different
* processing depending on the object type.
*/
if (objectKind == REINDEX_OBJECT_SCHEMA)
{
objectOid = get_namespace_oid(objectName, false);
if (!object_ownercheck(NamespaceRelationId, objectOid, GetUserId()) &&
!has_privs_of_role(GetUserId(), ROLE_PG_MAINTAIN))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA,
objectName);
}
else
{
objectOid = MyDatabaseId;
if (objectName && strcmp(objectName, get_database_name(objectOid)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("can only reindex the currently open database")));
if (!object_ownercheck(DatabaseRelationId, objectOid, GetUserId()) &&
!has_privs_of_role(GetUserId(), ROLE_PG_MAINTAIN))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE,
get_database_name(objectOid));
}
2000-02-18 10:30:20 +01:00
/*
* Create a memory context that will survive forced transaction commits we
* do below. Since it is a child of PortalContext, it will go away
* eventually even if we suffer an error; there's no need for special
* abort cleanup logic.
*/
private_context = AllocSetContextCreate(PortalContext,
"ReindexMultipleTables",
Add macros to make AllocSetContextCreate() calls simpler and safer. I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls had typos in the context-sizing parameters. While none of these led to especially significant problems, they did create minor inefficiencies, and it's now clear that expecting people to copy-and-paste those calls accurately is not a great idea. Let's reduce the risk of future errors by introducing single macros that encapsulate the common use-cases. Three such macros are enough to cover all but two special-purpose contexts; those two calls can be left as-is, I think. While this patch doesn't in itself improve matters for third-party extensions, it doesn't break anything for them either, and they can gradually adopt the simplified notation over time. In passing, change TopMemoryContext to use the default allocation parameters. Formerly it could only be extended 8K at a time. That was probably reasonable when this code was written; but nowadays we create many more contexts than we did then, so that it's not unusual to have a couple hundred K in TopMemoryContext, even without considering various dubious code that sticks other things there. There seems no good reason not to let it use growing blocks like most other contexts. Back-patch to 9.6, mostly because that's still close enough to HEAD that it's easy to do so, and keeping the branches in sync can be expected to avoid some future back-patching pain. The bugs fixed by these changes don't seem to be significant enough to justify fixing them further back. Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
ALLOCSET_SMALL_SIZES);
2000-02-18 10:30:20 +01:00
/*
* Define the search keys to find the objects to reindex. For a schema, we
* select target relations using relnamespace, something not necessary for
* a database-wide operation.
*/
if (objectKind == REINDEX_OBJECT_SCHEMA)
{
num_keys = 1;
ScanKeyInit(&scan_keys[0],
Anum_pg_class_relnamespace,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(objectOid));
}
else
num_keys = 0;
/*
* Scan pg_class to build a list of the relations we need to reindex.
*
* We only consider plain relations and materialized views here (toast
* rels will be processed indirectly by reindex_relation).
*/
relationRelation = table_open(RelationRelationId, AccessShareLock);
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
scan = table_beginscan_catalog(relationRelation, num_keys, scan_keys);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
2000-02-18 10:30:20 +01:00
{
Form_pg_class classtuple = (Form_pg_class) GETSTRUCT(tuple);
Remove WITH OIDS support, change oid catalog column visibility. Previously tables declared WITH OIDS, including a significant fraction of the catalog tables, stored the oid column not as a normal column, but as part of the tuple header. This special column was not shown by default, which was somewhat odd, as it's often (consider e.g. pg_class.oid) one of the more important parts of a row. Neither pg_dump nor COPY included the contents of the oid column by default. The fact that the oid column was not an ordinary column necessitated a significant amount of special case code to support oid columns. That already was painful for the existing, but upcoming work aiming to make table storage pluggable, would have required expanding and duplicating that "specialness" significantly. WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0). Remove it. Removing includes: - CREATE TABLE and ALTER TABLE syntax for declaring the table to be WITH OIDS has been removed (WITH (oids[ = true]) will error out) - pg_dump does not support dumping tables declared WITH OIDS and will issue a warning when dumping one (and ignore the oid column). - restoring an pg_dump archive with pg_restore will warn when restoring a table with oid contents (and ignore the oid column) - COPY will refuse to load binary dump that includes oids. - pg_upgrade will error out when encountering tables declared WITH OIDS, they have to be altered to remove the oid column first. - Functionality to access the oid of the last inserted row (like plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed. The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false) for CREATE TABLE) is still supported. While that requires a bit of support code, it seems unnecessary to break applications / dumps that do not use oids, and are explicit about not using them. The biggest user of WITH OID columns was postgres' catalog. This commit changes all 'magic' oid columns to be columns that are normally declared and stored. To reduce unnecessary query breakage all the newly added columns are still named 'oid', even if a table's column naming scheme would indicate 'reloid' or such. This obviously requires adapting a lot code, mostly replacing oid access via HeapTupleGetOid() with access to the underlying Form_pg_*->oid column. The bootstrap process now assigns oids for all oid columns in genbki.pl that do not have an explicit value (starting at the largest oid previously used), only oids assigned later by oids will be above FirstBootstrapObjectId. As the oid column now is a normal column the special bootstrap syntax for oids has been removed. Oids are not automatically assigned during insertion anymore, all backend code explicitly assigns oids with GetNewOidWithIndex(). For the rare case that insertions into the catalog via SQL are called for the new pg_nextoid() function can be used (which only works on catalog tables). The fact that oid columns on system tables are now normal columns means that they will be included in the set of columns expanded by * (i.e. SELECT * FROM pg_class will now include the table's oid, previously it did not). It'd not technically be hard to hide oid column by default, but that'd mean confusing behavior would either have to be carried forward forever, or it'd cause breakage down the line. While it's not unlikely that further adjustments are needed, the scope/invasiveness of the patch makes it worthwhile to get merge this now. It's painful to maintain externally, too complicated to commit after the code code freeze, and a dependency of a number of other patches. Catversion bump, for obvious reasons. Author: Andres Freund, with contributions by John Naylor Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
Oid relid = classtuple->oid;
/*
* Only regular tables and matviews can have indexes, so ignore any
* other kind of relation.
*
2020-09-08 03:09:22 +02:00
* Partitioned tables/indexes are skipped but matching leaf partitions
* are processed.
*/
if (classtuple->relkind != RELKIND_RELATION &&
classtuple->relkind != RELKIND_MATVIEW)
continue;
/* Skip temp tables of other backends; we can't reindex them at all */
if (classtuple->relpersistence == RELPERSISTENCE_TEMP &&
!isTempNamespace(classtuple->relnamespace))
continue;
/*
* Check user/system classification. SYSTEM processes all the
* catalogs, and DATABASE processes everything that's not a catalog.
*/
if (objectKind == REINDEX_OBJECT_SYSTEM &&
!IsCatalogRelationOid(relid))
continue;
else if (objectKind == REINDEX_OBJECT_DATABASE &&
IsCatalogRelationOid(relid))
continue;
/*
* The table can be reindexed if the user has been granted MAINTAIN on
* the table or one of its partition ancestors or the user is a
* superuser, the table owner, or the database/schema owner (but in
* the latter case, only if it's not a shared relation).
* pg_class_aclcheck includes the superuser case, and depending on
* objectKind we already know that the user has permission to run
* REINDEX on this database or schema per the permission checks at the
* beginning of this routine.
*/
if (classtuple->relisshared &&
pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&
!has_partition_ancestor_privs(relid, GetUserId(), ACL_MAINTAIN))
continue;
/*
Clean up the behavior and API of catalog.c's is-catalog-relation tests. The right way for IsCatalogRelation/Class to behave is to return true for OIDs less than FirstBootstrapObjectId (not FirstNormalObjectId), without any of the ad-hoc fooling around with schema membership. The previous code was wrong because (1) it claimed that information_schema tables were not catalog relations but their toast tables were, which is silly; and (2) if you dropped and recreated information_schema, which is a supported operation, the behavior changed. That's even sillier. With this definition, "catalog relations" are exactly the ones traceable to the postgres.bki data, which seems like what we want. With this simplification, we don't actually need access to the pg_class tuple to identify a catalog relation; we only need its OID. Hence, replace IsCatalogClass with "IsCatalogRelationOid(oid)". But keep IsCatalogRelation as a convenience function. This allows fixing some arguably-wrong semantics in contrib/sepgsql and ReindexRelationConcurrently, which were using an IsSystemNamespace test where what they really should be using is IsCatalogRelationOid. The previous coding failed to protect toast tables of system catalogs, and also was not on board with the general principle that user-created tables do not become catalogs just by virtue of being renamed into pg_catalog. We can also get rid of a messy hack in ReindexMultipleTables. While we're at it, also rename IsSystemNamespace to IsCatalogNamespace, because the previous name invited confusion with the more expansive semantics used by IsSystemRelation/Class. Also improve the comments in catalog.c. There are a few remaining places in replication-related code that are special-casing OIDs below FirstNormalObjectId. I'm inclined to think those are wrong too, and if there should be any special case it should just extend to FirstBootstrapObjectId. But first we need to debate whether a FOR ALL TABLES publication should include information_schema. Discussion: https://postgr.es/m/21697.1557092753@sss.pgh.pa.us Discussion: https://postgr.es/m/15150.1557257111@sss.pgh.pa.us
2019-05-09 05:27:29 +02:00
* Skip system tables, since index_create() would reject indexing them
* concurrently (and it would likely fail if we tried).
*/
if ((params->options & REINDEXOPT_CONCURRENTLY) != 0 &&
Clean up the behavior and API of catalog.c's is-catalog-relation tests. The right way for IsCatalogRelation/Class to behave is to return true for OIDs less than FirstBootstrapObjectId (not FirstNormalObjectId), without any of the ad-hoc fooling around with schema membership. The previous code was wrong because (1) it claimed that information_schema tables were not catalog relations but their toast tables were, which is silly; and (2) if you dropped and recreated information_schema, which is a supported operation, the behavior changed. That's even sillier. With this definition, "catalog relations" are exactly the ones traceable to the postgres.bki data, which seems like what we want. With this simplification, we don't actually need access to the pg_class tuple to identify a catalog relation; we only need its OID. Hence, replace IsCatalogClass with "IsCatalogRelationOid(oid)". But keep IsCatalogRelation as a convenience function. This allows fixing some arguably-wrong semantics in contrib/sepgsql and ReindexRelationConcurrently, which were using an IsSystemNamespace test where what they really should be using is IsCatalogRelationOid. The previous coding failed to protect toast tables of system catalogs, and also was not on board with the general principle that user-created tables do not become catalogs just by virtue of being renamed into pg_catalog. We can also get rid of a messy hack in ReindexMultipleTables. While we're at it, also rename IsSystemNamespace to IsCatalogNamespace, because the previous name invited confusion with the more expansive semantics used by IsSystemRelation/Class. Also improve the comments in catalog.c. There are a few remaining places in replication-related code that are special-casing OIDs below FirstNormalObjectId. I'm inclined to think those are wrong too, and if there should be any special case it should just extend to FirstBootstrapObjectId. But first we need to debate whether a FOR ALL TABLES publication should include information_schema. Discussion: https://postgr.es/m/21697.1557092753@sss.pgh.pa.us Discussion: https://postgr.es/m/15150.1557257111@sss.pgh.pa.us
2019-05-09 05:27:29 +02:00
IsCatalogRelationOid(relid))
{
if (!concurrent_warning)
ereport(WARNING,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot reindex system catalogs concurrently, skipping all")));
concurrent_warning = true;
continue;
}
/*
* If a new tablespace is set, check if this relation has to be
* skipped.
*/
if (OidIsValid(params->tablespaceOid))
{
bool skip_rel = false;
/*
* Mapped relations cannot be moved to different tablespaces (in
* particular this eliminates all shared catalogs.).
*/
if (RELKIND_HAS_STORAGE(classtuple->relkind) &&
Change internal RelFileNode references to RelFileNumber or RelFileLocator. We have been using the term RelFileNode to refer to either (1) the integer that is used to name the sequence of files for a certain relation within the directory set aside for that tablespace/database combination; or (2) that value plus the OIDs of the tablespace and database; or occasionally (3) the whole series of files created for a relation based on those values. Using the same name for more than one thing is confusing. Replace RelFileNode with RelFileNumber when we're talking about just the single number, i.e. (1) from above, and with RelFileLocator when we're talking about all the things that are needed to locate a relation's files on disk, i.e. (2) from above. In the places where we refer to (3) as a relfilenode, instead refer to "relation storage". Since there is a ton of SQL code in the world that knows about pg_class.relfilenode, don't change the name of that column, or of other SQL-facing things that derive their name from it. On the other hand, do adjust closely-related internal terminology. For example, the structure member names dbNode and spcNode appear to be derived from the fact that the structure itself was called RelFileNode, so change those to dbOid and spcOid. Likewise, various variables with names like rnode and relnode get renamed appropriately, according to how they're being used in context. Hopefully, this is clearer than before. It is also preparation for future patches that intend to widen the relfilenumber fields from its current width of 32 bits. Variables that store a relfilenumber are now declared as type RelFileNumber rather than type Oid; right now, these are the same, but that can now more easily be changed. Dilip Kumar, per an idea from me. Reviewed also by Andres Freund. I fixed some whitespace issues, changed a couple of words in a comment, and made one other minor correction. Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 17:39:09 +02:00
!RelFileNumberIsValid(classtuple->relfilenode))
skip_rel = true;
/*
* A system relation is always skipped, even with
* allow_system_table_mods enabled.
*/
if (IsSystemClass(relid, classtuple))
skip_rel = true;
if (skip_rel)
{
if (!tablespace_warning)
ereport(WARNING,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("cannot move system relations, skipping all")));
tablespace_warning = true;
continue;
}
}
/* Save the list of relation OIDs in private context */
old = MemoryContextSwitchTo(private_context);
/*
* We always want to reindex pg_class first if it's selected to be
* reindexed. This ensures that if there is any corruption in
* pg_class' indexes, they will be fixed before we process any other
* tables. This is critical because reindexing itself will try to
* update pg_class.
*/
if (relid == RelationRelationId)
relids = lcons_oid(relid, relids);
else
relids = lappend_oid(relids, relid);
MemoryContextSwitchTo(old);
2000-02-18 10:30:20 +01:00
}
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
table_endscan(scan);
table_close(relationRelation, AccessShareLock);
2000-02-18 10:30:20 +01:00
2020-09-08 03:09:22 +02:00
/*
* Process each relation listed in a separate transaction. Note that this
* commits and then starts a new transaction immediately.
*/
ReindexMultipleInternal(relids, params);
2020-09-08 03:09:22 +02:00
MemoryContextDelete(private_context);
}
/*
* Error callback specific to ReindexPartitions().
*/
static void
reindex_error_callback(void *arg)
{
ReindexErrorInfo *errinfo = (ReindexErrorInfo *) arg;
Assert(RELKIND_HAS_PARTITIONS(errinfo->relkind));
2020-09-08 03:09:22 +02:00
if (errinfo->relkind == RELKIND_PARTITIONED_TABLE)
errcontext("while reindexing partitioned table \"%s.%s\"",
errinfo->relnamespace, errinfo->relname);
else if (errinfo->relkind == RELKIND_PARTITIONED_INDEX)
errcontext("while reindexing partitioned index \"%s.%s\"",
errinfo->relnamespace, errinfo->relname);
}
/*
* ReindexPartitions
*
* Reindex a set of partitions, per the partitioned index or table given
* by the caller.
*/
static void
ReindexPartitions(Oid relid, ReindexParams *params, bool isTopLevel)
2020-09-08 03:09:22 +02:00
{
List *partitions = NIL;
char relkind = get_rel_relkind(relid);
char *relname = get_rel_name(relid);
char *relnamespace = get_namespace_name(get_rel_namespace(relid));
MemoryContext reindex_context;
List *inhoids;
ListCell *lc;
ErrorContextCallback errcallback;
ReindexErrorInfo errinfo;
Assert(RELKIND_HAS_PARTITIONS(relkind));
2020-09-08 03:09:22 +02:00
/*
* Check if this runs in a transaction block, with an error callback to
* provide more context under which a problem happens.
*/
errinfo.relname = pstrdup(relname);
errinfo.relnamespace = pstrdup(relnamespace);
errinfo.relkind = relkind;
errcallback.callback = reindex_error_callback;
errcallback.arg = (void *) &errinfo;
errcallback.previous = error_context_stack;
error_context_stack = &errcallback;
PreventInTransactionBlock(isTopLevel,
relkind == RELKIND_PARTITIONED_TABLE ?
"REINDEX TABLE" : "REINDEX INDEX");
/* Pop the error context stack */
error_context_stack = errcallback.previous;
/*
* Create special memory context for cross-transaction storage.
*
* Since it is a child of PortalContext, it will go away eventually even
* if we suffer an error so there is no need for special abort cleanup
* logic.
*/
reindex_context = AllocSetContextCreate(PortalContext, "Reindex",
ALLOCSET_DEFAULT_SIZES);
/* ShareLock is enough to prevent schema modifications */
inhoids = find_all_inheritors(relid, ShareLock, NULL);
/*
* The list of relations to reindex are the physical partitions of the
* tree so discard any partitioned table or index.
*/
foreach(lc, inhoids)
{
Oid partoid = lfirst_oid(lc);
char partkind = get_rel_relkind(partoid);
MemoryContext old_context;
/*
* This discards partitioned tables, partitioned indexes and foreign
* tables.
*/
if (!RELKIND_HAS_STORAGE(partkind))
continue;
Assert(partkind == RELKIND_INDEX ||
partkind == RELKIND_RELATION);
/* Save partition OID */
old_context = MemoryContextSwitchTo(reindex_context);
partitions = lappend_oid(partitions, partoid);
MemoryContextSwitchTo(old_context);
}
/*
* Process each partition listed in a separate transaction. Note that
* this commits and then starts a new transaction immediately.
*/
ReindexMultipleInternal(partitions, params);
2020-09-08 03:09:22 +02:00
/*
* Clean up working storage --- note we must do this after
* StartTransactionCommand, else we might be trying to delete the active
* context!
*/
MemoryContextDelete(reindex_context);
}
/*
* ReindexMultipleInternal
*
* Reindex a list of relations, each one being processed in its own
* transaction. This commits the existing transaction immediately,
* and starts a new transaction when finished.
*/
static void
ReindexMultipleInternal(List *relids, ReindexParams *params)
2020-09-08 03:09:22 +02:00
{
ListCell *l;
PopActiveSnapshot();
CommitTransactionCommand();
2020-09-08 03:09:22 +02:00
foreach(l, relids)
2000-02-18 10:30:20 +01:00
{
Oid relid = lfirst_oid(l);
2020-09-08 03:09:22 +02:00
char relkind;
char relpersistence;
StartTransactionCommand();
2020-09-08 03:09:22 +02:00
/* functions in indexes may want a snapshot set */
PushActiveSnapshot(GetTransactionSnapshot());
/* check if the relation still exists */
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid)))
{
PopActiveSnapshot();
CommitTransactionCommand();
continue;
}
/*
* Check permissions except when moving to database's default if a new
* tablespace is chosen. Note that this check also happens in
* ExecReindex(), but we do an extra check here as this runs across
* multiple transactions.
*/
if (OidIsValid(params->tablespaceOid) &&
params->tablespaceOid != MyDatabaseTableSpace)
{
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid,
GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params->tablespaceOid));
}
2020-09-08 03:09:22 +02:00
relkind = get_rel_relkind(relid);
relpersistence = get_rel_persistence(relid);
/*
* Partitioned tables and indexes can never be processed directly, and
* a list of their leaves should be built first.
*/
Assert(!RELKIND_HAS_PARTITIONS(relkind));
2020-09-08 03:09:22 +02:00
if ((params->options & REINDEXOPT_CONCURRENTLY) != 0 &&
2020-09-08 03:09:22 +02:00
relpersistence != RELPERSISTENCE_TEMP)
{
ReindexParams newparams = *params;
newparams.options |= REINDEXOPT_MISSING_OK;
(void) ReindexRelationConcurrently(relid, &newparams);
/* ReindexRelationConcurrently() does the verbose output */
}
2020-09-08 03:09:22 +02:00
else if (relkind == RELKIND_INDEX)
{
ReindexParams newparams = *params;
newparams.options |=
REINDEXOPT_REPORT_PROGRESS | REINDEXOPT_MISSING_OK;
reindex_index(relid, false, relpersistence, &newparams);
2020-09-08 03:09:22 +02:00
PopActiveSnapshot();
/* reindex_index() does the verbose output */
}
else
{
bool result;
ReindexParams newparams = *params;
newparams.options |=
REINDEXOPT_REPORT_PROGRESS | REINDEXOPT_MISSING_OK;
result = reindex_relation(relid,
REINDEX_REL_PROCESS_TOAST |
REINDEX_REL_CHECK_CONSTRAINTS,
&newparams);
if (result && (params->options & REINDEXOPT_VERBOSE) != 0)
ereport(INFO,
(errmsg("table \"%s.%s\" was reindexed",
get_namespace_name(get_rel_namespace(relid)),
get_rel_name(relid))));
PopActiveSnapshot();
}
CommitTransactionCommand();
}
2020-09-08 03:09:22 +02:00
StartTransactionCommand();
}
/*
* ReindexRelationConcurrently - process REINDEX CONCURRENTLY for given
* relation OID
*
* 'relationOid' can either belong to an index, a table or a materialized
* view. For tables and materialized views, all its indexes will be rebuilt,
* excluding invalid indexes and any indexes used in exclusion constraints,
* but including its associated toast table indexes. For indexes, the index
2020-09-08 03:09:22 +02:00
* itself will be rebuilt.
*
* The locks taken on parent tables and involved indexes are kept until the
* transaction is committed, at which point a session lock is taken on each
* relation. Both of these protect against concurrent schema changes.
*
* Returns true if any indexes have been rebuilt (including toast table's
* indexes, when relevant), otherwise returns false.
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
*
* NOTE: This cannot be used on temporary relations. A concurrent build would
* cause issues with ON COMMIT actions triggered by the transactions of the
* concurrent build. Temporary relations are not subject to concurrent
* concerns, so there's no need for the more complicated concurrent build,
* anyway, and a non-concurrent reindex is more efficient.
*/
static bool
ReindexRelationConcurrently(Oid relationOid, ReindexParams *params)
{
typedef struct ReindexIndexInfo
{
Oid indexId;
Oid tableId;
Oid amId;
bool safe; /* for set_indexsafe_procflags */
} ReindexIndexInfo;
List *heapRelationIds = NIL;
List *indexIds = NIL;
List *newIndexIds = NIL;
List *relationLocks = NIL;
List *lockTags = NIL;
ListCell *lc,
*lc2;
MemoryContext private_context;
MemoryContext oldcontext;
char relkind;
char *relationName = NULL;
char *relationNamespace = NULL;
PGRUsage ru0;
const int progress_index[] = {
PROGRESS_CREATEIDX_COMMAND,
PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_INDEX_OID,
PROGRESS_CREATEIDX_ACCESS_METHOD_OID
};
int64 progress_vals[4];
/*
* Create a memory context that will survive forced transaction commits we
* do below. Since it is a child of PortalContext, it will go away
* eventually even if we suffer an error; there's no need for special
* abort cleanup logic.
*/
private_context = AllocSetContextCreate(PortalContext,
"ReindexConcurrent",
ALLOCSET_SMALL_SIZES);
if ((params->options & REINDEXOPT_VERBOSE) != 0)
{
/* Save data needed by REINDEX VERBOSE in private context */
oldcontext = MemoryContextSwitchTo(private_context);
relationName = get_rel_name(relationOid);
relationNamespace = get_namespace_name(get_rel_namespace(relationOid));
pg_rusage_init(&ru0);
MemoryContextSwitchTo(oldcontext);
}
relkind = get_rel_relkind(relationOid);
/*
* Extract the list of indexes that are going to be rebuilt based on the
* relation Oid given by caller.
*/
switch (relkind)
{
case RELKIND_RELATION:
case RELKIND_MATVIEW:
case RELKIND_TOASTVALUE:
{
/*
* In the case of a relation, find all its indexes including
* toast indexes.
*/
Relation heapRelation;
/* Save the list of relation OIDs in private context */
oldcontext = MemoryContextSwitchTo(private_context);
/* Track this relation for session locks */
heapRelationIds = lappend_oid(heapRelationIds, relationOid);
MemoryContextSwitchTo(oldcontext);
if (IsCatalogRelationOid(relationOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot reindex system catalogs concurrently")));
/* Open relation to get its indexes */
if ((params->options & REINDEXOPT_MISSING_OK) != 0)
{
heapRelation = try_table_open(relationOid,
ShareUpdateExclusiveLock);
/* leave if relation does not exist */
if (!heapRelation)
break;
}
else
heapRelation = table_open(relationOid,
ShareUpdateExclusiveLock);
if (OidIsValid(params->tablespaceOid) &&
IsSystemRelation(heapRelation))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot move system relation \"%s\"",
RelationGetRelationName(heapRelation))));
/* Add all the valid indexes of relation to list */
foreach(lc, RelationGetIndexList(heapRelation))
{
Oid cellOid = lfirst_oid(lc);
Relation indexRelation = index_open(cellOid,
ShareUpdateExclusiveLock);
if (!indexRelation->rd_index->indisvalid)
ereport(WARNING,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot reindex invalid index \"%s.%s\" concurrently, skipping",
get_namespace_name(get_rel_namespace(cellOid)),
get_rel_name(cellOid))));
else if (indexRelation->rd_index->indisexclusion)
ereport(WARNING,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot reindex exclusion constraint index \"%s.%s\" concurrently, skipping",
get_namespace_name(get_rel_namespace(cellOid)),
get_rel_name(cellOid))));
else
{
ReindexIndexInfo *idx;
/* Save the list of relation OIDs in private context */
oldcontext = MemoryContextSwitchTo(private_context);
idx = palloc_object(ReindexIndexInfo);
idx->indexId = cellOid;
/* other fields set later */
indexIds = lappend(indexIds, idx);
MemoryContextSwitchTo(oldcontext);
}
index_close(indexRelation, NoLock);
}
/* Also add the toast indexes */
if (OidIsValid(heapRelation->rd_rel->reltoastrelid))
{
Oid toastOid = heapRelation->rd_rel->reltoastrelid;
Relation toastRelation = table_open(toastOid,
ShareUpdateExclusiveLock);
/* Save the list of relation OIDs in private context */
oldcontext = MemoryContextSwitchTo(private_context);
/* Track this relation for session locks */
heapRelationIds = lappend_oid(heapRelationIds, toastOid);
MemoryContextSwitchTo(oldcontext);
foreach(lc2, RelationGetIndexList(toastRelation))
{
Oid cellOid = lfirst_oid(lc2);
Relation indexRelation = index_open(cellOid,
ShareUpdateExclusiveLock);
if (!indexRelation->rd_index->indisvalid)
ereport(WARNING,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("cannot reindex invalid index \"%s.%s\" concurrently, skipping",
get_namespace_name(get_rel_namespace(cellOid)),
get_rel_name(cellOid))));
else
{
ReindexIndexInfo *idx;
/*
* Save the list of relation OIDs in private
* context
*/
oldcontext = MemoryContextSwitchTo(private_context);
idx = palloc_object(ReindexIndexInfo);
idx->indexId = cellOid;
indexIds = lappend(indexIds, idx);
/* other fields set later */
MemoryContextSwitchTo(oldcontext);
}
index_close(indexRelation, NoLock);
}
table_close(toastRelation, NoLock);
}
table_close(heapRelation, NoLock);
break;
}
case RELKIND_INDEX:
{
Oid heapId = IndexGetRelation(relationOid,
(params->options & REINDEXOPT_MISSING_OK) != 0);
Relation heapRelation;
ReindexIndexInfo *idx;
/* if relation is missing, leave */
if (!OidIsValid(heapId))
break;
Clean up the behavior and API of catalog.c's is-catalog-relation tests. The right way for IsCatalogRelation/Class to behave is to return true for OIDs less than FirstBootstrapObjectId (not FirstNormalObjectId), without any of the ad-hoc fooling around with schema membership. The previous code was wrong because (1) it claimed that information_schema tables were not catalog relations but their toast tables were, which is silly; and (2) if you dropped and recreated information_schema, which is a supported operation, the behavior changed. That's even sillier. With this definition, "catalog relations" are exactly the ones traceable to the postgres.bki data, which seems like what we want. With this simplification, we don't actually need access to the pg_class tuple to identify a catalog relation; we only need its OID. Hence, replace IsCatalogClass with "IsCatalogRelationOid(oid)". But keep IsCatalogRelation as a convenience function. This allows fixing some arguably-wrong semantics in contrib/sepgsql and ReindexRelationConcurrently, which were using an IsSystemNamespace test where what they really should be using is IsCatalogRelationOid. The previous coding failed to protect toast tables of system catalogs, and also was not on board with the general principle that user-created tables do not become catalogs just by virtue of being renamed into pg_catalog. We can also get rid of a messy hack in ReindexMultipleTables. While we're at it, also rename IsSystemNamespace to IsCatalogNamespace, because the previous name invited confusion with the more expansive semantics used by IsSystemRelation/Class. Also improve the comments in catalog.c. There are a few remaining places in replication-related code that are special-casing OIDs below FirstNormalObjectId. I'm inclined to think those are wrong too, and if there should be any special case it should just extend to FirstBootstrapObjectId. But first we need to debate whether a FOR ALL TABLES publication should include information_schema. Discussion: https://postgr.es/m/21697.1557092753@sss.pgh.pa.us Discussion: https://postgr.es/m/15150.1557257111@sss.pgh.pa.us
2019-05-09 05:27:29 +02:00
if (IsCatalogRelationOid(heapId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot reindex system catalogs concurrently")));
/*
* Don't allow reindex for an invalid index on TOAST table, as
* if rebuilt it would not be possible to drop it. Match
* error message in reindex_index().
*/
if (IsToastNamespace(get_rel_namespace(relationOid)) &&
!get_index_isvalid(relationOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot reindex invalid index on TOAST table")));
/*
* Check if parent relation can be locked and if it exists,
* this needs to be done at this stage as the list of indexes
* to rebuild is not complete yet, and REINDEXOPT_MISSING_OK
* should not be used once all the session locks are taken.
*/
if ((params->options & REINDEXOPT_MISSING_OK) != 0)
{
heapRelation = try_table_open(heapId,
ShareUpdateExclusiveLock);
/* leave if relation does not exist */
if (!heapRelation)
break;
}
else
heapRelation = table_open(heapId,
ShareUpdateExclusiveLock);
if (OidIsValid(params->tablespaceOid) &&
IsSystemRelation(heapRelation))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot move system relation \"%s\"",
get_rel_name(relationOid))));
table_close(heapRelation, NoLock);
/* Save the list of relation OIDs in private context */
oldcontext = MemoryContextSwitchTo(private_context);
/* Track the heap relation of this index for session locks */
heapRelationIds = list_make1_oid(heapId);
/*
* Save the list of relation OIDs in private context. Note
* that invalid indexes are allowed here.
*/
idx = palloc_object(ReindexIndexInfo);
idx->indexId = relationOid;
indexIds = lappend(indexIds, idx);
/* other fields set later */
MemoryContextSwitchTo(oldcontext);
break;
}
2020-09-08 03:09:22 +02:00
case RELKIND_PARTITIONED_TABLE:
2020-09-08 03:09:22 +02:00
case RELKIND_PARTITIONED_INDEX:
default:
/* Return error if type of relation is not supported */
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot reindex this type of relation concurrently")));
break;
}
/*
* Definitely no indexes, so leave. Any checks based on
* REINDEXOPT_MISSING_OK should be done only while the list of indexes to
* work on is built as the session locks taken before this transaction
* commits will make sure that they cannot be dropped by a concurrent
* session until this operation completes.
*/
if (indexIds == NIL)
{
PopActiveSnapshot();
return false;
}
/* It's not a shared catalog, so refuse to move it to shared tablespace */
if (params->tablespaceOid == GLOBALTABLESPACE_OID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot move non-shared relation to tablespace \"%s\"",
get_tablespace_name(params->tablespaceOid))));
Assert(heapRelationIds != NIL);
/*-----
* Now we have all the indexes we want to process in indexIds.
*
* The phases now are:
*
* 1. create new indexes in the catalog
* 2. build new indexes
* 3. let new indexes catch up with tuples inserted in the meantime
* 4. swap index names
* 5. mark old indexes as dead
* 6. drop old indexes
*
* We process each phase for all indexes before moving to the next phase,
* for efficiency.
*/
/*
* Phase 1 of REINDEX CONCURRENTLY
*
* Create a new index with the same properties as the old one, but it is
* only registered in catalogs and will be built later. Then get session
* locks on all involved tables. See analogous code in DefineIndex() for
* more detailed comments.
*/
foreach(lc, indexIds)
{
char *concurrentName;
ReindexIndexInfo *idx = lfirst(lc);
ReindexIndexInfo *newidx;
Oid newIndexId;
Relation indexRel;
Relation heapRel;
Oid save_userid;
int save_sec_context;
int save_nestlevel;
Relation newIndexRel;
LockRelId *lockrelid;
Oid tablespaceid;
indexRel = index_open(idx->indexId, ShareUpdateExclusiveLock);
heapRel = table_open(indexRel->rd_index->indrelid,
ShareUpdateExclusiveLock);
/*
* Switch to the table owner's userid, so that any index functions are
* run as that user. Also lock down security-restricted operations
* and arrange to make GUC variable changes local to this command.
*/
GetUserIdAndSecContext(&save_userid, &save_sec_context);
SetUserIdAndSecContext(heapRel->rd_rel->relowner,
save_sec_context | SECURITY_RESTRICTED_OPERATION);
save_nestlevel = NewGUCNestLevel();
/* determine safety of this index for set_indexsafe_procflags */
idx->safe = (indexRel->rd_indexprs == NIL &&
indexRel->rd_indpred == NIL);
idx->tableId = RelationGetRelid(heapRel);
idx->amId = indexRel->rd_rel->relam;
Fix concurrent indexing operations with temporary tables Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY on a temporary relation with ON COMMIT actions triggered unexpected errors because those operations use multiple transactions internally to complete their work. Here is for example one confusing error when using ON COMMIT DELETE ROWS: ERROR: index "foo" already contains data Issues related to temporary relations and concurrent indexing are fixed in this commit by enforcing the non-concurrent path to be taken for temporary relations even if using CONCURRENTLY, transparently to the user. Using a non-concurrent path does not matter in practice as locks cannot be taken on a temporary relation by a session different than the one owning the relation, and the non-concurrent operation is more effective. The problem exists with REINDEX since v12 with the introduction of CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for those commands. In all supported versions, this caused only confusing error messages to be generated. Note that with REINDEX, it was also possible to issue a REINDEX CONCURRENTLY for a temporary relation owned by a different session, leading to a server crash. The idea to enforce transparently the non-concurrent code path for temporary relations comes originally from Andres Freund. Reported-by: Manuel Rigger Author: Michael Paquier, Heikki Linnakangas Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
/* This function shouldn't be called for temporary relations. */
if (indexRel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
elog(ERROR, "cannot reindex a temporary table concurrently");
pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX,
idx->tableId);
progress_vals[0] = PROGRESS_CREATEIDX_COMMAND_REINDEX_CONCURRENTLY;
progress_vals[1] = 0; /* initializing */
progress_vals[2] = idx->indexId;
progress_vals[3] = idx->amId;
pgstat_progress_update_multi_param(4, progress_index, progress_vals);
/* Choose a temporary relation name for the new index */
concurrentName = ChooseRelationName(get_rel_name(idx->indexId),
NULL,
"ccnew",
get_rel_namespace(indexRel->rd_index->indrelid),
false);
/* Choose the new tablespace, indexes of toast tables are not moved */
if (OidIsValid(params->tablespaceOid) &&
heapRel->rd_rel->relkind != RELKIND_TOASTVALUE)
tablespaceid = params->tablespaceOid;
else
tablespaceid = indexRel->rd_rel->reltablespace;
/* Create new index definition based on given index */
newIndexId = index_concurrently_create_copy(heapRel,
idx->indexId,
tablespaceid,
concurrentName);
/*
* Now open the relation of the new index, a session-level lock is
* also needed on it.
*/
newIndexRel = index_open(newIndexId, ShareUpdateExclusiveLock);
/*
* Save the list of OIDs and locks in private context
*/
oldcontext = MemoryContextSwitchTo(private_context);
newidx = palloc_object(ReindexIndexInfo);
newidx->indexId = newIndexId;
newidx->safe = idx->safe;
newidx->tableId = idx->tableId;
newidx->amId = idx->amId;
newIndexIds = lappend(newIndexIds, newidx);
/*
* Save lockrelid to protect each relation from drop then close
* relations. The lockrelid on parent relation is not taken here to
* avoid multiple locks taken on the same relation, instead we rely on
* parentRelationIds built earlier.
*/
lockrelid = palloc_object(LockRelId);
*lockrelid = indexRel->rd_lockInfo.lockRelId;
relationLocks = lappend(relationLocks, lockrelid);
lockrelid = palloc_object(LockRelId);
*lockrelid = newIndexRel->rd_lockInfo.lockRelId;
relationLocks = lappend(relationLocks, lockrelid);
MemoryContextSwitchTo(oldcontext);
index_close(indexRel, NoLock);
index_close(newIndexRel, NoLock);
/* Roll back any GUC changes executed by index functions */
AtEOXact_GUC(false, save_nestlevel);
/* Restore userid and security context */
SetUserIdAndSecContext(save_userid, save_sec_context);
table_close(heapRel, NoLock);
}
/*
* Save the heap lock for following visibility checks with other backends
* might conflict with this session.
*/
foreach(lc, heapRelationIds)
{
Relation heapRelation = table_open(lfirst_oid(lc), ShareUpdateExclusiveLock);
LockRelId *lockrelid;
LOCKTAG *heaplocktag;
/* Save the list of locks in private context */
oldcontext = MemoryContextSwitchTo(private_context);
/* Add lockrelid of heap relation to the list of locked relations */
lockrelid = palloc_object(LockRelId);
*lockrelid = heapRelation->rd_lockInfo.lockRelId;
relationLocks = lappend(relationLocks, lockrelid);
heaplocktag = palloc_object(LOCKTAG);
/* Save the LOCKTAG for this parent relation for the wait phase */
SET_LOCKTAG_RELATION(*heaplocktag, lockrelid->dbId, lockrelid->relId);
lockTags = lappend(lockTags, heaplocktag);
MemoryContextSwitchTo(oldcontext);
/* Close heap relation */
table_close(heapRelation, NoLock);
}
/* Get a session-level lock on each table. */
foreach(lc, relationLocks)
{
LockRelId *lockrelid = (LockRelId *) lfirst(lc);
LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
}
PopActiveSnapshot();
CommitTransactionCommand();
StartTransactionCommand();
/*
* Because we don't take a snapshot in this transaction, there's no need
* to set the PROC_IN_SAFE_IC flag here.
*/
/*
* Phase 2 of REINDEX CONCURRENTLY
*
* Build the new indexes in a separate transaction for each index to avoid
* having open transactions for an unnecessary long time. But before
* doing that, wait until no running transactions could have the table of
* the index open with the old list of indexes. See "phase 2" in
* DefineIndex() for more details.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_PHASE_WAIT_1);
WaitForLockersMultiple(lockTags, ShareLock, true);
CommitTransactionCommand();
foreach(lc, newIndexIds)
{
ReindexIndexInfo *newidx = lfirst(lc);
/* Start new transaction for this index's concurrent build */
StartTransactionCommand();
/*
* Check for user-requested abort. This is inside a transaction so as
* xact.c does not issue a useless WARNING, and ensures that
* session-level locks are cleaned up on abort.
*/
CHECK_FOR_INTERRUPTS();
/* Tell concurrent indexing to ignore us, if index qualifies */
if (newidx->safe)
set_indexsafe_procflags();
/* Set ActiveSnapshot since functions in the indexes may need it */
PushActiveSnapshot(GetTransactionSnapshot());
/*
* Update progress for the index to build, with the correct parent
* table involved.
*/
pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX, newidx->tableId);
progress_vals[0] = PROGRESS_CREATEIDX_COMMAND_REINDEX_CONCURRENTLY;
progress_vals[1] = PROGRESS_CREATEIDX_PHASE_BUILD;
progress_vals[2] = newidx->indexId;
progress_vals[3] = newidx->amId;
pgstat_progress_update_multi_param(4, progress_index, progress_vals);
/* Perform concurrent build of new index */
index_concurrently_build(newidx->tableId, newidx->indexId);
PopActiveSnapshot();
CommitTransactionCommand();
}
StartTransactionCommand();
/*
* Because we don't take a snapshot or Xid in this transaction, there's no
* need to set the PROC_IN_SAFE_IC flag here.
*/
/*
* Phase 3 of REINDEX CONCURRENTLY
*
* During this phase the old indexes catch up with any new tuples that
* were created during the previous phase. See "phase 3" in DefineIndex()
* for more details.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_PHASE_WAIT_2);
WaitForLockersMultiple(lockTags, ShareLock, true);
CommitTransactionCommand();
foreach(lc, newIndexIds)
{
ReindexIndexInfo *newidx = lfirst(lc);
TransactionId limitXmin;
Snapshot snapshot;
StartTransactionCommand();
/*
* Check for user-requested abort. This is inside a transaction so as
* xact.c does not issue a useless WARNING, and ensures that
* session-level locks are cleaned up on abort.
*/
CHECK_FOR_INTERRUPTS();
/* Tell concurrent indexing to ignore us, if index qualifies */
if (newidx->safe)
set_indexsafe_procflags();
/*
* Take the "reference snapshot" that will be used by validate_index()
* to filter candidate tuples.
*/
snapshot = RegisterSnapshot(GetTransactionSnapshot());
PushActiveSnapshot(snapshot);
/*
* Update progress for the index to build, with the correct parent
* table involved.
*/
pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX,
newidx->tableId);
progress_vals[0] = PROGRESS_CREATEIDX_COMMAND_REINDEX_CONCURRENTLY;
progress_vals[1] = PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN;
progress_vals[2] = newidx->indexId;
progress_vals[3] = newidx->amId;
pgstat_progress_update_multi_param(4, progress_index, progress_vals);
validate_index(newidx->tableId, newidx->indexId, snapshot);
/*
* We can now do away with our active snapshot, we still need to save
* the xmin limit to wait for older snapshots.
*/
limitXmin = snapshot->xmin;
PopActiveSnapshot();
UnregisterSnapshot(snapshot);
/*
* To ensure no deadlocks, we must commit and start yet another
* transaction, and do our wait before any snapshot has been taken in
* it.
*/
CommitTransactionCommand();
StartTransactionCommand();
/*
* The index is now valid in the sense that it contains all currently
* interesting tuples. But since it might not contain tuples deleted
* just before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots.
*
* Because we don't take a snapshot or Xid in this transaction,
* there's no need to set the PROC_IN_SAFE_IC flag here.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_PHASE_WAIT_3);
WaitForOlderSnapshots(limitXmin, true);
CommitTransactionCommand();
2000-02-18 10:30:20 +01:00
}
/*
* Phase 4 of REINDEX CONCURRENTLY
*
* Now that the new indexes have been validated, swap each new index with
* its corresponding old index.
*
* We mark the new indexes as valid and the old indexes as not valid at
* the same time to make sure we only get constraint violations from the
* indexes with the correct names.
*/
StartTransactionCommand();
/*
* Because this transaction only does catalog manipulations and doesn't do
* any index operations, we can set the PROC_IN_SAFE_IC flag here
* unconditionally.
*/
set_indexsafe_procflags();
forboth(lc, indexIds, lc2, newIndexIds)
{
ReindexIndexInfo *oldidx = lfirst(lc);
ReindexIndexInfo *newidx = lfirst(lc2);
char *oldName;
/*
* Check for user-requested abort. This is inside a transaction so as
* xact.c does not issue a useless WARNING, and ensures that
* session-level locks are cleaned up on abort.
*/
CHECK_FOR_INTERRUPTS();
/* Choose a relation name for old index */
oldName = ChooseRelationName(get_rel_name(oldidx->indexId),
NULL,
"ccold",
get_rel_namespace(oldidx->tableId),
false);
/*
* Swap old index with the new one. This also marks the new one as
* valid and the old one as not valid.
*/
index_concurrently_swap(newidx->indexId, oldidx->indexId, oldName);
/*
* Invalidate the relcache for the table, so that after this commit
* all sessions will refresh any cached plans that might reference the
* index.
*/
CacheInvalidateRelcacheByRelid(oldidx->tableId);
/*
* CCI here so that subsequent iterations see the oldName in the
* catalog and can choose a nonconflicting name for their oldName.
* Otherwise, this could lead to conflicts if a table has two indexes
* whose names are equal for the first NAMEDATALEN-minus-a-few
* characters.
*/
CommandCounterIncrement();
}
/* Commit this transaction and make index swaps visible */
CommitTransactionCommand();
StartTransactionCommand();
/*
* While we could set PROC_IN_SAFE_IC if all indexes qualified, there's no
* real need for that, because we only acquire an Xid after the wait is
* done, and that lasts for a very short period.
*/
/*
* Phase 5 of REINDEX CONCURRENTLY
*
* Mark the old indexes as dead. First we must wait until no running
* transaction could be using the index for a query. See also
* index_drop() for more details.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_PHASE_WAIT_4);
WaitForLockersMultiple(lockTags, AccessExclusiveLock, true);
foreach(lc, indexIds)
{
ReindexIndexInfo *oldidx = lfirst(lc);
/*
* Check for user-requested abort. This is inside a transaction so as
* xact.c does not issue a useless WARNING, and ensures that
* session-level locks are cleaned up on abort.
*/
CHECK_FOR_INTERRUPTS();
index_concurrently_set_dead(oldidx->tableId, oldidx->indexId);
}
/* Commit this transaction to make the updates visible. */
CommitTransactionCommand();
StartTransactionCommand();
/*
* While we could set PROC_IN_SAFE_IC if all indexes qualified, there's no
* real need for that, because we only acquire an Xid after the wait is
* done, and that lasts for a very short period.
*/
/*
* Phase 6 of REINDEX CONCURRENTLY
*
* Drop the old indexes.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
PROGRESS_CREATEIDX_PHASE_WAIT_5);
WaitForLockersMultiple(lockTags, AccessExclusiveLock, true);
PushActiveSnapshot(GetTransactionSnapshot());
{
ObjectAddresses *objects = new_object_addresses();
foreach(lc, indexIds)
{
ReindexIndexInfo *idx = lfirst(lc);
ObjectAddress object;
object.classId = RelationRelationId;
object.objectId = idx->indexId;
object.objectSubId = 0;
add_exact_object_address(&object, objects);
}
/*
* Use PERFORM_DELETION_CONCURRENT_LOCK so that index_drop() uses the
* right lock level.
*/
performMultipleDeletions(objects, DROP_RESTRICT,
PERFORM_DELETION_CONCURRENT_LOCK | PERFORM_DELETION_INTERNAL);
}
PopActiveSnapshot();
CommitTransactionCommand();
/*
* Finally, release the session-level lock on the table.
*/
foreach(lc, relationLocks)
{
LockRelId *lockrelid = (LockRelId *) lfirst(lc);
UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
}
/* Start a new transaction to finish process properly */
StartTransactionCommand();
/* Log what we did */
if ((params->options & REINDEXOPT_VERBOSE) != 0)
{
if (relkind == RELKIND_INDEX)
ereport(INFO,
(errmsg("index \"%s.%s\" was reindexed",
relationNamespace, relationName),
errdetail("%s.",
pg_rusage_show(&ru0))));
else
{
foreach(lc, newIndexIds)
{
ReindexIndexInfo *idx = lfirst(lc);
Oid indOid = idx->indexId;
ereport(INFO,
(errmsg("index \"%s.%s\" was reindexed",
get_namespace_name(get_rel_namespace(indOid)),
get_rel_name(indOid))));
/* Don't show rusage here, since it's not per index. */
}
ereport(INFO,
(errmsg("table \"%s.%s\" was reindexed",
relationNamespace, relationName),
errdetail("%s.",
pg_rusage_show(&ru0))));
}
}
MemoryContextDelete(private_context);
pgstat_progress_end_command();
return true;
2000-02-18 10:30:20 +01:00
}
/*
* Insert or delete an appropriate pg_inherits tuple to make the given index
* be a partition of the indicated parent index.
*
* This also corrects the pg_depend information for the affected index.
*/
void
IndexSetParentIndex(Relation partitionIdx, Oid parentOid)
{
Relation pg_inherits;
ScanKeyData key[2];
SysScanDesc scan;
Oid partRelid = RelationGetRelid(partitionIdx);
HeapTuple tuple;
bool fix_dependencies;
/* Make sure this is an index */
Assert(partitionIdx->rd_rel->relkind == RELKIND_INDEX ||
partitionIdx->rd_rel->relkind == RELKIND_PARTITIONED_INDEX);
/*
* Scan pg_inherits for rows linking our index to some parent.
*/
pg_inherits = relation_open(InheritsRelationId, RowExclusiveLock);
ScanKeyInit(&key[0],
Anum_pg_inherits_inhrelid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(partRelid));
ScanKeyInit(&key[1],
Anum_pg_inherits_inhseqno,
BTEqualStrategyNumber, F_INT4EQ,
Int32GetDatum(1));
scan = systable_beginscan(pg_inherits, InheritsRelidSeqnoIndexId, true,
NULL, 2, key);
tuple = systable_getnext(scan);
if (!HeapTupleIsValid(tuple))
{
if (parentOid == InvalidOid)
{
/*
* No pg_inherits row, and no parent wanted: nothing to do in this
* case.
*/
fix_dependencies = false;
}
else
{
StoreSingleInheritance(partRelid, parentOid, 1);
fix_dependencies = true;
}
}
else
{
Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple);
if (parentOid == InvalidOid)
{
/*
* There exists a pg_inherits row, which we want to clear; do so.
*/
CatalogTupleDelete(pg_inherits, &tuple->t_self);
fix_dependencies = true;
}
else
{
/*
* A pg_inherits row exists. If it's the same we want, then we're
* good; if it differs, that amounts to a corrupt catalog and
* should not happen.
*/
if (inhForm->inhparent != parentOid)
{
/* unexpected: we should not get called in this case */
elog(ERROR, "bogus pg_inherit row: inhrelid %u inhparent %u",
inhForm->inhrelid, inhForm->inhparent);
}
/* already in the right state */
fix_dependencies = false;
}
}
/* done with pg_inherits */
systable_endscan(scan);
relation_close(pg_inherits, RowExclusiveLock);
/* set relhassubclass if an index partition has been added to the parent */
if (OidIsValid(parentOid))
SetRelationHasSubclass(parentOid, true);
/* set relispartition correctly on the partition */
update_relispartition(partRelid, OidIsValid(parentOid));
if (fix_dependencies)
{
/*
Redesign the partition dependency mechanism. The original setup for dependencies of partitioned objects had serious problems: 1. It did not verify that a drop cascading to a partition-child object also cascaded to at least one of the object's partition parents. Now, normally a child object would share all its dependencies with one or another parent (e.g. a child index's opclass dependencies would be shared with the parent index), so that this oversight is usually harmless. But if some dependency failed to fit this pattern, the child could be dropped while all its parents remain, creating a logically broken situation. (It's easy to construct artificial cases that break it, such as attaching an unrelated extension dependency to the child object and then dropping the extension. I'm not sure if any less-artificial cases exist.) 2. Management of partition dependencies during ATTACH/DETACH PARTITION was complicated and buggy; for example, after detaching a partition table it was possible to create cases where a formerly-child index should be dropped and was not, because the correct set of dependencies had not been reconstructed. Less seriously, because multiple partition relationships were represented identically in pg_depend, there was an order-of-traversal dependency on which partition parent was cited in error messages. We also had some pre-existing order-of-traversal hazards for error messages related to internal and extension dependencies. This is cosmetic to users but causes testing problems. To fix #1, add a check at the end of the partition tree traversal to ensure that at least one partition parent got deleted. To fix #2, establish a new policy that partition dependencies are in addition to, not instead of, a child object's usual dependencies; in this way ATTACH/DETACH PARTITION need not cope with adding or removing the usual dependencies. To fix the cosmetic problem, distinguish between primary and secondary partition dependency entries in pg_depend, by giving them different deptypes. (They behave identically except for having different priorities for being cited in error messages.) This means that the former 'I' dependency type is replaced with new 'P' and 'S' types. This also fixes a longstanding bug that after handling an internal dependency by recursing to the owning object, findDependentObjects did not verify that the current target was now scheduled for deletion, and did not apply the current recursion level's objflags to it. Perhaps that should be back-patched; but in the back branches it would only matter if some concurrent transaction had removed the internal-linkage pg_depend entry before the recursive call found it, or the recursive call somehow failed to find it, both of which seem unlikely. Catversion bump because the contents of pg_depend change for partitioning relationships. Patch HEAD only. It's annoying that we're not fixing #2 in v11, but there seems no practical way to do so given that the problem is exactly a poor choice of what entries to put in pg_depend. We can't really fix that while staying compatible with what's in pg_depend in existing v11 installations. Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
* Insert/delete pg_depend rows. If setting a parent, add PARTITION
* dependencies on the parent index and the table; if removing a
* parent, delete PARTITION dependencies.
*/
if (OidIsValid(parentOid))
{
Redesign the partition dependency mechanism. The original setup for dependencies of partitioned objects had serious problems: 1. It did not verify that a drop cascading to a partition-child object also cascaded to at least one of the object's partition parents. Now, normally a child object would share all its dependencies with one or another parent (e.g. a child index's opclass dependencies would be shared with the parent index), so that this oversight is usually harmless. But if some dependency failed to fit this pattern, the child could be dropped while all its parents remain, creating a logically broken situation. (It's easy to construct artificial cases that break it, such as attaching an unrelated extension dependency to the child object and then dropping the extension. I'm not sure if any less-artificial cases exist.) 2. Management of partition dependencies during ATTACH/DETACH PARTITION was complicated and buggy; for example, after detaching a partition table it was possible to create cases where a formerly-child index should be dropped and was not, because the correct set of dependencies had not been reconstructed. Less seriously, because multiple partition relationships were represented identically in pg_depend, there was an order-of-traversal dependency on which partition parent was cited in error messages. We also had some pre-existing order-of-traversal hazards for error messages related to internal and extension dependencies. This is cosmetic to users but causes testing problems. To fix #1, add a check at the end of the partition tree traversal to ensure that at least one partition parent got deleted. To fix #2, establish a new policy that partition dependencies are in addition to, not instead of, a child object's usual dependencies; in this way ATTACH/DETACH PARTITION need not cope with adding or removing the usual dependencies. To fix the cosmetic problem, distinguish between primary and secondary partition dependency entries in pg_depend, by giving them different deptypes. (They behave identically except for having different priorities for being cited in error messages.) This means that the former 'I' dependency type is replaced with new 'P' and 'S' types. This also fixes a longstanding bug that after handling an internal dependency by recursing to the owning object, findDependentObjects did not verify that the current target was now scheduled for deletion, and did not apply the current recursion level's objflags to it. Perhaps that should be back-patched; but in the back branches it would only matter if some concurrent transaction had removed the internal-linkage pg_depend entry before the recursive call found it, or the recursive call somehow failed to find it, both of which seem unlikely. Catversion bump because the contents of pg_depend change for partitioning relationships. Patch HEAD only. It's annoying that we're not fixing #2 in v11, but there seems no practical way to do so given that the problem is exactly a poor choice of what entries to put in pg_depend. We can't really fix that while staying compatible with what's in pg_depend in existing v11 installations. Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
ObjectAddress partIdx;
ObjectAddress parentIdx;
Redesign the partition dependency mechanism. The original setup for dependencies of partitioned objects had serious problems: 1. It did not verify that a drop cascading to a partition-child object also cascaded to at least one of the object's partition parents. Now, normally a child object would share all its dependencies with one or another parent (e.g. a child index's opclass dependencies would be shared with the parent index), so that this oversight is usually harmless. But if some dependency failed to fit this pattern, the child could be dropped while all its parents remain, creating a logically broken situation. (It's easy to construct artificial cases that break it, such as attaching an unrelated extension dependency to the child object and then dropping the extension. I'm not sure if any less-artificial cases exist.) 2. Management of partition dependencies during ATTACH/DETACH PARTITION was complicated and buggy; for example, after detaching a partition table it was possible to create cases where a formerly-child index should be dropped and was not, because the correct set of dependencies had not been reconstructed. Less seriously, because multiple partition relationships were represented identically in pg_depend, there was an order-of-traversal dependency on which partition parent was cited in error messages. We also had some pre-existing order-of-traversal hazards for error messages related to internal and extension dependencies. This is cosmetic to users but causes testing problems. To fix #1, add a check at the end of the partition tree traversal to ensure that at least one partition parent got deleted. To fix #2, establish a new policy that partition dependencies are in addition to, not instead of, a child object's usual dependencies; in this way ATTACH/DETACH PARTITION need not cope with adding or removing the usual dependencies. To fix the cosmetic problem, distinguish between primary and secondary partition dependency entries in pg_depend, by giving them different deptypes. (They behave identically except for having different priorities for being cited in error messages.) This means that the former 'I' dependency type is replaced with new 'P' and 'S' types. This also fixes a longstanding bug that after handling an internal dependency by recursing to the owning object, findDependentObjects did not verify that the current target was now scheduled for deletion, and did not apply the current recursion level's objflags to it. Perhaps that should be back-patched; but in the back branches it would only matter if some concurrent transaction had removed the internal-linkage pg_depend entry before the recursive call found it, or the recursive call somehow failed to find it, both of which seem unlikely. Catversion bump because the contents of pg_depend change for partitioning relationships. Patch HEAD only. It's annoying that we're not fixing #2 in v11, but there seems no practical way to do so given that the problem is exactly a poor choice of what entries to put in pg_depend. We can't really fix that while staying compatible with what's in pg_depend in existing v11 installations. Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
ObjectAddress partitionTbl;
Redesign the partition dependency mechanism. The original setup for dependencies of partitioned objects had serious problems: 1. It did not verify that a drop cascading to a partition-child object also cascaded to at least one of the object's partition parents. Now, normally a child object would share all its dependencies with one or another parent (e.g. a child index's opclass dependencies would be shared with the parent index), so that this oversight is usually harmless. But if some dependency failed to fit this pattern, the child could be dropped while all its parents remain, creating a logically broken situation. (It's easy to construct artificial cases that break it, such as attaching an unrelated extension dependency to the child object and then dropping the extension. I'm not sure if any less-artificial cases exist.) 2. Management of partition dependencies during ATTACH/DETACH PARTITION was complicated and buggy; for example, after detaching a partition table it was possible to create cases where a formerly-child index should be dropped and was not, because the correct set of dependencies had not been reconstructed. Less seriously, because multiple partition relationships were represented identically in pg_depend, there was an order-of-traversal dependency on which partition parent was cited in error messages. We also had some pre-existing order-of-traversal hazards for error messages related to internal and extension dependencies. This is cosmetic to users but causes testing problems. To fix #1, add a check at the end of the partition tree traversal to ensure that at least one partition parent got deleted. To fix #2, establish a new policy that partition dependencies are in addition to, not instead of, a child object's usual dependencies; in this way ATTACH/DETACH PARTITION need not cope with adding or removing the usual dependencies. To fix the cosmetic problem, distinguish between primary and secondary partition dependency entries in pg_depend, by giving them different deptypes. (They behave identically except for having different priorities for being cited in error messages.) This means that the former 'I' dependency type is replaced with new 'P' and 'S' types. This also fixes a longstanding bug that after handling an internal dependency by recursing to the owning object, findDependentObjects did not verify that the current target was now scheduled for deletion, and did not apply the current recursion level's objflags to it. Perhaps that should be back-patched; but in the back branches it would only matter if some concurrent transaction had removed the internal-linkage pg_depend entry before the recursive call found it, or the recursive call somehow failed to find it, both of which seem unlikely. Catversion bump because the contents of pg_depend change for partitioning relationships. Patch HEAD only. It's annoying that we're not fixing #2 in v11, but there seems no practical way to do so given that the problem is exactly a poor choice of what entries to put in pg_depend. We can't really fix that while staying compatible with what's in pg_depend in existing v11 installations. Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
ObjectAddressSet(partIdx, RelationRelationId, partRelid);
ObjectAddressSet(parentIdx, RelationRelationId, parentOid);
Redesign the partition dependency mechanism. The original setup for dependencies of partitioned objects had serious problems: 1. It did not verify that a drop cascading to a partition-child object also cascaded to at least one of the object's partition parents. Now, normally a child object would share all its dependencies with one or another parent (e.g. a child index's opclass dependencies would be shared with the parent index), so that this oversight is usually harmless. But if some dependency failed to fit this pattern, the child could be dropped while all its parents remain, creating a logically broken situation. (It's easy to construct artificial cases that break it, such as attaching an unrelated extension dependency to the child object and then dropping the extension. I'm not sure if any less-artificial cases exist.) 2. Management of partition dependencies during ATTACH/DETACH PARTITION was complicated and buggy; for example, after detaching a partition table it was possible to create cases where a formerly-child index should be dropped and was not, because the correct set of dependencies had not been reconstructed. Less seriously, because multiple partition relationships were represented identically in pg_depend, there was an order-of-traversal dependency on which partition parent was cited in error messages. We also had some pre-existing order-of-traversal hazards for error messages related to internal and extension dependencies. This is cosmetic to users but causes testing problems. To fix #1, add a check at the end of the partition tree traversal to ensure that at least one partition parent got deleted. To fix #2, establish a new policy that partition dependencies are in addition to, not instead of, a child object's usual dependencies; in this way ATTACH/DETACH PARTITION need not cope with adding or removing the usual dependencies. To fix the cosmetic problem, distinguish between primary and secondary partition dependency entries in pg_depend, by giving them different deptypes. (They behave identically except for having different priorities for being cited in error messages.) This means that the former 'I' dependency type is replaced with new 'P' and 'S' types. This also fixes a longstanding bug that after handling an internal dependency by recursing to the owning object, findDependentObjects did not verify that the current target was now scheduled for deletion, and did not apply the current recursion level's objflags to it. Perhaps that should be back-patched; but in the back branches it would only matter if some concurrent transaction had removed the internal-linkage pg_depend entry before the recursive call found it, or the recursive call somehow failed to find it, both of which seem unlikely. Catversion bump because the contents of pg_depend change for partitioning relationships. Patch HEAD only. It's annoying that we're not fixing #2 in v11, but there seems no practical way to do so given that the problem is exactly a poor choice of what entries to put in pg_depend. We can't really fix that while staying compatible with what's in pg_depend in existing v11 installations. Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
ObjectAddressSet(partitionTbl, RelationRelationId,
partitionIdx->rd_index->indrelid);
recordDependencyOn(&partIdx, &parentIdx,
DEPENDENCY_PARTITION_PRI);
recordDependencyOn(&partIdx, &partitionTbl,
DEPENDENCY_PARTITION_SEC);
}
else
{
deleteDependencyRecordsForClass(RelationRelationId, partRelid,
RelationRelationId,
Redesign the partition dependency mechanism. The original setup for dependencies of partitioned objects had serious problems: 1. It did not verify that a drop cascading to a partition-child object also cascaded to at least one of the object's partition parents. Now, normally a child object would share all its dependencies with one or another parent (e.g. a child index's opclass dependencies would be shared with the parent index), so that this oversight is usually harmless. But if some dependency failed to fit this pattern, the child could be dropped while all its parents remain, creating a logically broken situation. (It's easy to construct artificial cases that break it, such as attaching an unrelated extension dependency to the child object and then dropping the extension. I'm not sure if any less-artificial cases exist.) 2. Management of partition dependencies during ATTACH/DETACH PARTITION was complicated and buggy; for example, after detaching a partition table it was possible to create cases where a formerly-child index should be dropped and was not, because the correct set of dependencies had not been reconstructed. Less seriously, because multiple partition relationships were represented identically in pg_depend, there was an order-of-traversal dependency on which partition parent was cited in error messages. We also had some pre-existing order-of-traversal hazards for error messages related to internal and extension dependencies. This is cosmetic to users but causes testing problems. To fix #1, add a check at the end of the partition tree traversal to ensure that at least one partition parent got deleted. To fix #2, establish a new policy that partition dependencies are in addition to, not instead of, a child object's usual dependencies; in this way ATTACH/DETACH PARTITION need not cope with adding or removing the usual dependencies. To fix the cosmetic problem, distinguish between primary and secondary partition dependency entries in pg_depend, by giving them different deptypes. (They behave identically except for having different priorities for being cited in error messages.) This means that the former 'I' dependency type is replaced with new 'P' and 'S' types. This also fixes a longstanding bug that after handling an internal dependency by recursing to the owning object, findDependentObjects did not verify that the current target was now scheduled for deletion, and did not apply the current recursion level's objflags to it. Perhaps that should be back-patched; but in the back branches it would only matter if some concurrent transaction had removed the internal-linkage pg_depend entry before the recursive call found it, or the recursive call somehow failed to find it, both of which seem unlikely. Catversion bump because the contents of pg_depend change for partitioning relationships. Patch HEAD only. It's annoying that we're not fixing #2 in v11, but there seems no practical way to do so given that the problem is exactly a poor choice of what entries to put in pg_depend. We can't really fix that while staying compatible with what's in pg_depend in existing v11 installations. Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
DEPENDENCY_PARTITION_PRI);
deleteDependencyRecordsForClass(RelationRelationId, partRelid,
RelationRelationId,
DEPENDENCY_PARTITION_SEC);
}
/* make our updates visible */
CommandCounterIncrement();
}
}
/*
* Subroutine of IndexSetParentIndex to update the relispartition flag of the
* given index to the given value.
*/
static void
update_relispartition(Oid relationId, bool newval)
{
HeapTuple tup;
Relation classRel;
classRel = table_open(RelationRelationId, RowExclusiveLock);
tup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relationId));
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for relation %u", relationId);
Assert(((Form_pg_class) GETSTRUCT(tup))->relispartition != newval);
((Form_pg_class) GETSTRUCT(tup))->relispartition = newval;
CatalogTupleUpdate(classRel, &tup->t_self, tup);
heap_freetuple(tup);
table_close(classRel, RowExclusiveLock);
}
/*
* Set the PROC_IN_SAFE_IC flag in MyProc->statusFlags.
*
* When doing concurrent index builds, we can set this flag
* to tell other processes concurrently running CREATE
* INDEX CONCURRENTLY or REINDEX CONCURRENTLY to ignore us when
* doing their waits for concurrent snapshots. On one hand it
* avoids pointlessly waiting for a process that's not interesting
* anyway; but more importantly it avoids deadlocks in some cases.
*
* This can be done safely only for indexes that don't execute any
* expressions that could access other tables, so index must not be
* expressional nor partial. Caller is responsible for only calling
* this routine when that assumption holds true.
*
* (The flag is reset automatically at transaction end, so it must be
* set for each transaction.)
*/
static inline void
set_indexsafe_procflags(void)
{
/*
* This should only be called before installing xid or xmin in MyProc;
* otherwise, concurrent processes could see an Xmin that moves backwards.
*/
Assert(MyProc->xid == InvalidTransactionId &&
MyProc->xmin == InvalidTransactionId);
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->statusFlags |= PROC_IN_SAFE_IC;
ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
LWLockRelease(ProcArrayLock);
}