1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* index.c
|
1997-09-07 07:04:48 +02:00
|
|
|
* code to create and destroy POSTGRES index relations
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2020-01-01 18:21:45 +01:00
|
|
|
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/catalog/index.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* INTERFACE ROUTINES
|
1997-09-07 07:04:48 +02:00
|
|
|
* index_create() - Create a cataloged index relation
|
1999-12-10 04:56:14 +01:00
|
|
|
* index_drop() - Removes index relation from catalogs
|
2000-07-15 00:18:02 +02:00
|
|
|
* BuildIndexInfo() - Prepare to insert index tuples
|
|
|
|
* FormIndexDatum() - Construct datum vector for one index tuple
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
1998-01-13 05:05:12 +01:00
|
|
|
#include "postgres.h"
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2000-12-09 21:32:44 +01:00
|
|
|
#include <unistd.h>
|
1999-07-16 07:00:38 +02:00
|
|
|
|
2019-12-27 00:09:00 +01:00
|
|
|
#include "access/amapi.h"
|
Don't include heapam.h from others headers.
heapam.h previously was included in a number of widely used
headers (e.g. execnodes.h, indirectly in executor.h, ...). That's
problematic on its own, as heapam.h contains a lot of low-level
details that don't need to be exposed that widely, but becomes more
problematic with the upcoming introduction of pluggable table storage
- it seems inappropriate for heapam.h to be included that widely
afterwards.
heapam.h was largely only included in other headers to get the
HeapScanDesc typedef (which was defined in heapam.h, even though
HeapScanDescData is defined in relscan.h). The better solution here
seems to be to just use the underlying struct (forward declared where
necessary). Similar for BulkInsertState.
Another problem was that LockTupleMode was used in executor.h - parts
of the file tried to cope without heapam.h, but due to the fact that
it indirectly included it, several subsequent violations of that goal
were not not noticed. We could just reuse the approach of declaring
parameters as int, but it seems nicer to move LockTupleMode to
lockoptions.h - that's not a perfect location, but also doesn't seem
bad.
As a number of files relied on implicitly included heapam.h, a
significant number of files grew an explicit include. It's quite
probably that a few external projects will need to do the same.
Author: Andres Freund
Reviewed-By: Alvaro Herrera
Discussion: https://postgr.es/m/20190114000701.y4ttcb74jpskkcfb@alap3.anarazel.de
2019-01-15 00:54:18 +01:00
|
|
|
#include "access/heapam.h"
|
Improve concurrency of foreign key locking
This patch introduces two additional lock modes for tuples: "SELECT FOR
KEY SHARE" and "SELECT FOR NO KEY UPDATE". These don't block each
other, in contrast with already existing "SELECT FOR SHARE" and "SELECT
FOR UPDATE". UPDATE commands that do not modify the values stored in
the columns that are part of the key of the tuple now grab a SELECT FOR
NO KEY UPDATE lock on the tuple, allowing them to proceed concurrently
with tuple locks of the FOR KEY SHARE variety.
Foreign key triggers now use FOR KEY SHARE instead of FOR SHARE; this
means the concurrency improvement applies to them, which is the whole
point of this patch.
The added tuple lock semantics require some rejiggering of the multixact
module, so that the locking level that each transaction is holding can
be stored alongside its Xid. Also, multixacts now need to persist
across server restarts and crashes, because they can now represent not
only tuple locks, but also tuple updates. This means we need more
careful tracking of lifetime of pg_multixact SLRU files; since they now
persist longer, we require more infrastructure to figure out when they
can be removed. pg_upgrade also needs to be careful to copy
pg_multixact files over from the old server to the new, or at least part
of multixact.c state, depending on the versions of the old and new
servers.
Tuple time qualification rules (HeapTupleSatisfies routines) need to be
careful not to consider tuples with the "is multi" infomask bit set as
being only locked; they might need to look up MultiXact values (i.e.
possibly do pg_multixact I/O) to find out the Xid that updated a tuple,
whereas they previously were assured to only use information readily
available from the tuple header. This is considered acceptable, because
the extra I/O would involve cases that would previously cause some
commands to block waiting for concurrent transactions to finish.
Another important change is the fact that locking tuples that have
previously been updated causes the future versions to be marked as
locked, too; this is essential for correctness of foreign key checks.
This causes additional WAL-logging, also (there was previously a single
WAL record for a locked tuple; now there are as many as updated copies
of the tuple there exist.)
With all this in place, contention related to tuples being checked by
foreign key rules should be much reduced.
As a bonus, the old behavior that a subtransaction grabbing a stronger
tuple lock than the parent (sub)transaction held on a given tuple and
later aborting caused the weaker lock to be lost, has been fixed.
Many new spec files were added for isolation tester framework, to ensure
overall behavior is sane. There's probably room for several more tests.
There were several reviewers of this patch; in particular, Noah Misch
and Andres Freund spent considerable time in it. Original idea for the
patch came from Simon Riggs, after a problem report by Joel Jacobson.
Most code is from me, with contributions from Marti Raudsepp, Alexander
Shulgin, Noah Misch and Andres Freund.
This patch was discussed in several pgsql-hackers threads; the most
important start at the following message-ids:
AANLkTimo9XVcEzfiBR-ut3KVNDkjm2Vxh+t8kAmWjPuv@mail.gmail.com
1290721684-sup-3951@alvh.no-ip.org
1294953201-sup-2099@alvh.no-ip.org
1320343602-sup-2290@alvh.no-ip.org
1339690386-sup-8927@alvh.no-ip.org
4FE5FF020200002500048A3D@gw.wicourts.gov
4FEAB90A0200002500048B7D@gw.wicourts.gov
2013-01-23 16:04:59 +01:00
|
|
|
#include "access/multixact.h"
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
#include "access/reloptions.h"
|
2008-06-19 02:46:06 +02:00
|
|
|
#include "access/relscan.h"
|
2008-05-12 02:00:54 +02:00
|
|
|
#include "access/sysattr.h"
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
#include "access/tableam.h"
|
2006-07-13 18:49:20 +02:00
|
|
|
#include "access/transam.h"
|
2011-10-14 23:23:01 +02:00
|
|
|
#include "access/visibilitymap.h"
|
2006-07-13 18:49:20 +02:00
|
|
|
#include "access/xact.h"
|
1998-01-13 05:05:12 +01:00
|
|
|
#include "bootstrap/bootstrap.h"
|
2013-12-19 22:10:01 +01:00
|
|
|
#include "catalog/binary_upgrade.h"
|
2000-11-08 23:10:03 +01:00
|
|
|
#include "catalog/catalog.h"
|
2002-07-12 20:43:19 +02:00
|
|
|
#include "catalog/dependency.h"
|
1998-01-13 05:05:12 +01:00
|
|
|
#include "catalog/heap.h"
|
|
|
|
#include "catalog/index.h"
|
2012-10-23 23:07:26 +02:00
|
|
|
#include "catalog/objectaccess.h"
|
2019-04-12 08:36:05 +02:00
|
|
|
#include "catalog/partition.h"
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
#include "catalog/pg_am.h"
|
2011-02-12 14:54:13 +01:00
|
|
|
#include "catalog/pg_collation.h"
|
2002-07-12 20:43:19 +02:00
|
|
|
#include "catalog/pg_constraint.h"
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
#include "catalog/pg_depend.h"
|
2019-11-12 04:00:16 +01:00
|
|
|
#include "catalog/pg_description.h"
|
2018-04-08 20:35:29 +02:00
|
|
|
#include "catalog/pg_inherits.h"
|
2001-08-22 20:24:26 +02:00
|
|
|
#include "catalog/pg_opclass.h"
|
2019-11-12 04:00:16 +01:00
|
|
|
#include "catalog/pg_operator.h"
|
2007-10-12 20:55:12 +02:00
|
|
|
#include "catalog/pg_tablespace.h"
|
2009-07-29 22:56:21 +02:00
|
|
|
#include "catalog/pg_trigger.h"
|
1998-01-13 05:05:12 +01:00
|
|
|
#include "catalog/pg_type.h"
|
2008-11-19 11:34:52 +01:00
|
|
|
#include "catalog/storage.h"
|
2018-10-07 00:17:46 +02:00
|
|
|
#include "commands/event_trigger.h"
|
2019-03-25 15:59:04 +01:00
|
|
|
#include "commands/progress.h"
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
#include "commands/tablecmds.h"
|
2009-07-29 22:56:21 +02:00
|
|
|
#include "commands/trigger.h"
|
1998-01-13 05:05:12 +01:00
|
|
|
#include "executor/executor.h"
|
|
|
|
#include "miscadmin.h"
|
2009-07-29 22:56:21 +02:00
|
|
|
#include "nodes/makefuncs.h"
|
2008-08-26 00:42:34 +02:00
|
|
|
#include "nodes/nodeFuncs.h"
|
2019-01-29 21:48:51 +01:00
|
|
|
#include "optimizer/optimizer.h"
|
2009-07-29 22:56:21 +02:00
|
|
|
#include "parser/parser.h"
|
2019-03-25 15:59:04 +01:00
|
|
|
#include "pgstat.h"
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
#include "rewrite/rewriteManip.h"
|
2008-05-12 02:00:54 +02:00
|
|
|
#include "storage/bufmgr.h"
|
|
|
|
#include "storage/lmgr.h"
|
2011-06-08 12:47:21 +02:00
|
|
|
#include "storage/predicate.h"
|
2005-05-19 23:35:48 +02:00
|
|
|
#include "storage/procarray.h"
|
1998-01-13 05:05:12 +01:00
|
|
|
#include "storage/smgr.h"
|
|
|
|
#include "utils/builtins.h"
|
2000-05-28 19:56:29 +02:00
|
|
|
#include "utils/fmgroids.h"
|
2011-09-04 07:13:16 +02:00
|
|
|
#include "utils/guc.h"
|
2000-11-08 23:10:03 +01:00
|
|
|
#include "utils/inval.h"
|
2002-03-26 20:17:02 +01:00
|
|
|
#include "utils/lsyscache.h"
|
2005-05-06 19:24:55 +02:00
|
|
|
#include "utils/memutils.h"
|
2015-05-15 13:09:57 +02:00
|
|
|
#include "utils/pg_rusage.h"
|
2019-11-12 04:00:16 +01:00
|
|
|
#include "utils/snapmgr.h"
|
1998-01-13 05:05:12 +01:00
|
|
|
#include "utils/syscache.h"
|
2006-08-25 06:06:58 +02:00
|
|
|
#include "utils/tuplesort.h"
|
2002-03-26 20:17:02 +01:00
|
|
|
|
2015-03-11 03:33:25 +01:00
|
|
|
/* Potentially set by pg_upgrade_support functions */
|
2011-01-08 03:25:34 +01:00
|
|
|
Oid binary_upgrade_next_index_pg_class_oid = InvalidOid;
|
2010-02-03 02:14:17 +01:00
|
|
|
|
2018-01-19 13:48:44 +01:00
|
|
|
/*
|
|
|
|
* Pointer-free representation of variables used when reindexing system
|
|
|
|
* catalogs; we use this to propagate those values to parallel workers.
|
|
|
|
*/
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
Oid currentlyReindexedHeap;
|
|
|
|
Oid currentlyReindexedIndex;
|
|
|
|
int numPendingReindexedIndexes;
|
|
|
|
Oid pendingReindexedIndexes[FLEXIBLE_ARRAY_MEMBER];
|
|
|
|
} SerializedReindexState;
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* non-export function prototypes */
|
2011-01-25 21:42:03 +01:00
|
|
|
static bool relationHasPrimaryKey(Relation rel);
|
2001-01-24 01:06:07 +01:00
|
|
|
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
|
2019-05-22 19:04:48 +02:00
|
|
|
IndexInfo *indexInfo,
|
|
|
|
List *indexColNames,
|
|
|
|
Oid accessMethodObjectId,
|
|
|
|
Oid *collationObjectId,
|
|
|
|
Oid *classObjectId);
|
1998-09-01 06:40:42 +02:00
|
|
|
static void InitializeAttributeOids(Relation indexRelation,
|
2019-05-22 19:04:48 +02:00
|
|
|
int numatts, Oid indexoid);
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
static void AppendAttributeTuples(Relation indexRelation, int numatts,
|
|
|
|
Datum *attopts);
|
1998-09-01 06:40:42 +02:00
|
|
|
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
|
2019-05-22 19:04:48 +02:00
|
|
|
Oid parentIndexId,
|
|
|
|
IndexInfo *indexInfo,
|
|
|
|
Oid *collationOids,
|
|
|
|
Oid *classOids,
|
|
|
|
int16 *coloptions,
|
|
|
|
bool primary,
|
|
|
|
bool isexclusion,
|
|
|
|
bool immediate,
|
|
|
|
bool isvalid,
|
|
|
|
bool isready);
|
2009-12-07 06:22:23 +01:00
|
|
|
static void index_update_stats(Relation rel,
|
2019-05-22 19:04:48 +02:00
|
|
|
bool hasindex,
|
|
|
|
double reltuples);
|
2009-12-07 06:22:23 +01:00
|
|
|
static void IndexCheckExclusion(Relation heapRelation,
|
2019-05-22 19:04:48 +02:00
|
|
|
Relation indexRelation,
|
|
|
|
IndexInfo *indexInfo);
|
2006-08-25 06:06:58 +02:00
|
|
|
static bool validate_index_callback(ItemPointer itemptr, void *opaque);
|
2011-06-06 04:30:04 +02:00
|
|
|
static bool ReindexIsCurrentlyProcessingIndex(Oid indexOid);
|
2010-02-07 21:48:13 +01:00
|
|
|
static void SetReindexProcessing(Oid heapOid, Oid indexOid);
|
|
|
|
static void ResetReindexProcessing(void);
|
|
|
|
static void SetReindexPending(List *indexes);
|
|
|
|
static void RemoveReindexPending(Oid indexOid);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2000-07-15 00:18:02 +02:00
|
|
|
|
2011-01-25 21:42:03 +01:00
|
|
|
/*
|
|
|
|
* relationHasPrimaryKey
|
|
|
|
* See whether an existing relation has a primary key.
|
|
|
|
*
|
|
|
|
* Caller must have suitable lock on the relation.
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
*
|
2018-12-27 10:07:46 +01:00
|
|
|
* Note: we intentionally do not check indisvalid here; that's because this
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* is used to enforce the rule that there can be only one indisprimary index,
|
|
|
|
* and we want that to be true even if said index is invalid.
|
2011-01-25 21:42:03 +01:00
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
relationHasPrimaryKey(Relation rel)
|
|
|
|
{
|
|
|
|
bool result = false;
|
|
|
|
List *indexoidlist;
|
|
|
|
ListCell *indexoidscan;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the list of index OIDs for the table from the relcache, and look up
|
|
|
|
* each one in the pg_index syscache until we find one marked primary key
|
|
|
|
* (hopefully there isn't more than one such).
|
|
|
|
*/
|
|
|
|
indexoidlist = RelationGetIndexList(rel);
|
|
|
|
|
|
|
|
foreach(indexoidscan, indexoidlist)
|
|
|
|
{
|
|
|
|
Oid indexoid = lfirst_oid(indexoidscan);
|
|
|
|
HeapTuple indexTuple;
|
|
|
|
|
|
|
|
indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid));
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
if (!HeapTupleIsValid(indexTuple)) /* should not happen */
|
2011-01-25 21:42:03 +01:00
|
|
|
elog(ERROR, "cache lookup failed for index %u", indexoid);
|
|
|
|
result = ((Form_pg_index) GETSTRUCT(indexTuple))->indisprimary;
|
|
|
|
ReleaseSysCache(indexTuple);
|
|
|
|
if (result)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_free(indexoidlist);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* index_check_primary_key
|
|
|
|
* Apply special checks needed before creating a PRIMARY KEY index
|
|
|
|
*
|
|
|
|
* This processing used to be in DefineIndex(), but has been split out
|
|
|
|
* so that it can be applied during ALTER TABLE ADD PRIMARY KEY USING INDEX.
|
|
|
|
*
|
|
|
|
* We check for a pre-existing primary key, and that all columns of the index
|
|
|
|
* are simple column references (not expressions), and that all those
|
Avoid order-of-execution problems with ALTER TABLE ADD PRIMARY KEY.
Up to now, DefineIndex() was responsible for adding attnotnull constraints
to the columns of a primary key, in any case where it hadn't been
convenient for transformIndexConstraint() to mark those columns as
is_not_null. It (or rather its minion index_check_primary_key) did this
by executing an ALTER TABLE SET NOT NULL command for the target table.
The trouble with this solution is that if we're creating the index due
to ALTER TABLE ADD PRIMARY KEY, and the outer ALTER TABLE has additional
sub-commands, the inner ALTER TABLE's operations executed at the wrong
time with respect to the outer ALTER TABLE's operations. In particular,
the inner ALTER would perform a validation scan at a point where the
table's storage might be inconsistent with its catalog entries. (This is
on the hairy edge of being a security problem, but AFAICS it isn't one
because the inner scan would only be interested in the tuples' null
bitmaps.) This can result in unexpected failures, such as the one seen
in bug #15580 from Allison Kaptur.
To fix, let's remove the attempt to do SET NOT NULL from DefineIndex(),
reducing index_check_primary_key's role to verifying that the columns are
already not null. (It shouldn't ever see such a case, but it seems wise
to keep the check for safety.) Instead, make transformIndexConstraint()
generate ALTER TABLE SET NOT NULL subcommands to be executed ahead of
the ADD PRIMARY KEY operation in every case where it can't force the
column to be created already-not-null. This requires only minor surgery
in parse_utilcmd.c, and it makes for a much more satisfying spec for
transformIndexConstraint(): it's no longer having to take it on faith
that someone else will handle addition of NOT NULL constraints.
To make that work, we have to move the execution of AT_SetNotNull into
an ALTER pass that executes ahead of AT_PASS_ADD_INDEX. I moved it to
AT_PASS_COL_ATTRS, and put that after AT_PASS_ADD_COL to avoid failure
when the column is being added in the same command. This incidentally
fixes a bug in the only previous usage of AT_PASS_COL_ATTRS, for
AT_SetIdentity: it didn't work either for a newly-added column.
Playing around with this exposed a separate bug in ALTER TABLE ONLY ...
ADD PRIMARY KEY for partitioned tables. The intent of the ONLY modifier
in that context is to prevent doing anything that would require holding
lock for a long time --- but the implied SET NOT NULL would recurse to
the child partitions, and do an expensive validation scan for any child
where the column(s) were not already NOT NULL. To fix that, invent a
new ALTER subcommand AT_CheckNotNull that just insists that a child
column be already NOT NULL, and apply that, not AT_SetNotNull, when
recursing to children in this scenario. This results in a slightly laxer
definition of ALTER TABLE ONLY ... SET NOT NULL for partitioned tables,
too: that command will now work as long as all children are already NOT
NULL, whereas before it just threw up its hands if there were any
partitions.
In passing, clean up the API of generateClonedIndexStmt(): remove a
useless argument, ensure that the output argument is not left undefined,
update the header comment.
A small side effect of this change is that no-such-column errors in ALTER
TABLE ADD PRIMARY KEY now produce a different message that includes the
table name, because they are now detected by the SET NOT NULL step which
has historically worded its error that way. That seems fine to me, so
I didn't make any effort to avoid the wording change.
The basic bug #15580 is of very long standing, and these other bugs
aren't new in v12 either. However, this is a pretty significant change
in the way ALTER TABLE ADD PRIMARY KEY works. On balance it seems best
not to back-patch, at least not till we get some more confidence that
this patch has no new bugs.
Patch by me, but thanks to Jie Zhang for a preliminary version.
Discussion: https://postgr.es/m/15580-d1a6de5a3d65da51@postgresql.org
Discussion: https://postgr.es/m/1396E95157071C4EBBA51892C5368521017F2E6E63@G08CNEXMBPEKD02.g08.fujitsu.local
2019-04-23 18:25:27 +02:00
|
|
|
* columns are marked NOT NULL. If not, fail.
|
2011-01-25 21:42:03 +01:00
|
|
|
*
|
Avoid order-of-execution problems with ALTER TABLE ADD PRIMARY KEY.
Up to now, DefineIndex() was responsible for adding attnotnull constraints
to the columns of a primary key, in any case where it hadn't been
convenient for transformIndexConstraint() to mark those columns as
is_not_null. It (or rather its minion index_check_primary_key) did this
by executing an ALTER TABLE SET NOT NULL command for the target table.
The trouble with this solution is that if we're creating the index due
to ALTER TABLE ADD PRIMARY KEY, and the outer ALTER TABLE has additional
sub-commands, the inner ALTER TABLE's operations executed at the wrong
time with respect to the outer ALTER TABLE's operations. In particular,
the inner ALTER would perform a validation scan at a point where the
table's storage might be inconsistent with its catalog entries. (This is
on the hairy edge of being a security problem, but AFAICS it isn't one
because the inner scan would only be interested in the tuples' null
bitmaps.) This can result in unexpected failures, such as the one seen
in bug #15580 from Allison Kaptur.
To fix, let's remove the attempt to do SET NOT NULL from DefineIndex(),
reducing index_check_primary_key's role to verifying that the columns are
already not null. (It shouldn't ever see such a case, but it seems wise
to keep the check for safety.) Instead, make transformIndexConstraint()
generate ALTER TABLE SET NOT NULL subcommands to be executed ahead of
the ADD PRIMARY KEY operation in every case where it can't force the
column to be created already-not-null. This requires only minor surgery
in parse_utilcmd.c, and it makes for a much more satisfying spec for
transformIndexConstraint(): it's no longer having to take it on faith
that someone else will handle addition of NOT NULL constraints.
To make that work, we have to move the execution of AT_SetNotNull into
an ALTER pass that executes ahead of AT_PASS_ADD_INDEX. I moved it to
AT_PASS_COL_ATTRS, and put that after AT_PASS_ADD_COL to avoid failure
when the column is being added in the same command. This incidentally
fixes a bug in the only previous usage of AT_PASS_COL_ATTRS, for
AT_SetIdentity: it didn't work either for a newly-added column.
Playing around with this exposed a separate bug in ALTER TABLE ONLY ...
ADD PRIMARY KEY for partitioned tables. The intent of the ONLY modifier
in that context is to prevent doing anything that would require holding
lock for a long time --- but the implied SET NOT NULL would recurse to
the child partitions, and do an expensive validation scan for any child
where the column(s) were not already NOT NULL. To fix that, invent a
new ALTER subcommand AT_CheckNotNull that just insists that a child
column be already NOT NULL, and apply that, not AT_SetNotNull, when
recursing to children in this scenario. This results in a slightly laxer
definition of ALTER TABLE ONLY ... SET NOT NULL for partitioned tables,
too: that command will now work as long as all children are already NOT
NULL, whereas before it just threw up its hands if there were any
partitions.
In passing, clean up the API of generateClonedIndexStmt(): remove a
useless argument, ensure that the output argument is not left undefined,
update the header comment.
A small side effect of this change is that no-such-column errors in ALTER
TABLE ADD PRIMARY KEY now produce a different message that includes the
table name, because they are now detected by the SET NOT NULL step which
has historically worded its error that way. That seems fine to me, so
I didn't make any effort to avoid the wording change.
The basic bug #15580 is of very long standing, and these other bugs
aren't new in v12 either. However, this is a pretty significant change
in the way ALTER TABLE ADD PRIMARY KEY works. On balance it seems best
not to back-patch, at least not till we get some more confidence that
this patch has no new bugs.
Patch by me, but thanks to Jie Zhang for a preliminary version.
Discussion: https://postgr.es/m/15580-d1a6de5a3d65da51@postgresql.org
Discussion: https://postgr.es/m/1396E95157071C4EBBA51892C5368521017F2E6E63@G08CNEXMBPEKD02.g08.fujitsu.local
2019-04-23 18:25:27 +02:00
|
|
|
* We used to automatically change unmarked columns to NOT NULL here by doing
|
|
|
|
* our own local ALTER TABLE command. But that doesn't work well if we're
|
|
|
|
* executing one subcommand of an ALTER TABLE: the operations may not get
|
|
|
|
* performed in the right order overall. Now we expect that the parser
|
|
|
|
* inserted any required ALTER TABLE SET NOT NULL operations before trying
|
|
|
|
* to create a primary-key index.
|
2017-08-04 17:45:18 +02:00
|
|
|
*
|
2011-01-25 21:42:03 +01:00
|
|
|
* Caller had better have at least ShareLock on the table, else the not-null
|
|
|
|
* checking isn't trustworthy.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
index_check_primary_key(Relation heapRel,
|
|
|
|
IndexInfo *indexInfo,
|
2018-10-07 00:17:46 +02:00
|
|
|
bool is_alter_table,
|
|
|
|
IndexStmt *stmt)
|
2011-01-25 21:42:03 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
Avoid order-of-execution problems with ALTER TABLE ADD PRIMARY KEY.
Up to now, DefineIndex() was responsible for adding attnotnull constraints
to the columns of a primary key, in any case where it hadn't been
convenient for transformIndexConstraint() to mark those columns as
is_not_null. It (or rather its minion index_check_primary_key) did this
by executing an ALTER TABLE SET NOT NULL command for the target table.
The trouble with this solution is that if we're creating the index due
to ALTER TABLE ADD PRIMARY KEY, and the outer ALTER TABLE has additional
sub-commands, the inner ALTER TABLE's operations executed at the wrong
time with respect to the outer ALTER TABLE's operations. In particular,
the inner ALTER would perform a validation scan at a point where the
table's storage might be inconsistent with its catalog entries. (This is
on the hairy edge of being a security problem, but AFAICS it isn't one
because the inner scan would only be interested in the tuples' null
bitmaps.) This can result in unexpected failures, such as the one seen
in bug #15580 from Allison Kaptur.
To fix, let's remove the attempt to do SET NOT NULL from DefineIndex(),
reducing index_check_primary_key's role to verifying that the columns are
already not null. (It shouldn't ever see such a case, but it seems wise
to keep the check for safety.) Instead, make transformIndexConstraint()
generate ALTER TABLE SET NOT NULL subcommands to be executed ahead of
the ADD PRIMARY KEY operation in every case where it can't force the
column to be created already-not-null. This requires only minor surgery
in parse_utilcmd.c, and it makes for a much more satisfying spec for
transformIndexConstraint(): it's no longer having to take it on faith
that someone else will handle addition of NOT NULL constraints.
To make that work, we have to move the execution of AT_SetNotNull into
an ALTER pass that executes ahead of AT_PASS_ADD_INDEX. I moved it to
AT_PASS_COL_ATTRS, and put that after AT_PASS_ADD_COL to avoid failure
when the column is being added in the same command. This incidentally
fixes a bug in the only previous usage of AT_PASS_COL_ATTRS, for
AT_SetIdentity: it didn't work either for a newly-added column.
Playing around with this exposed a separate bug in ALTER TABLE ONLY ...
ADD PRIMARY KEY for partitioned tables. The intent of the ONLY modifier
in that context is to prevent doing anything that would require holding
lock for a long time --- but the implied SET NOT NULL would recurse to
the child partitions, and do an expensive validation scan for any child
where the column(s) were not already NOT NULL. To fix that, invent a
new ALTER subcommand AT_CheckNotNull that just insists that a child
column be already NOT NULL, and apply that, not AT_SetNotNull, when
recursing to children in this scenario. This results in a slightly laxer
definition of ALTER TABLE ONLY ... SET NOT NULL for partitioned tables,
too: that command will now work as long as all children are already NOT
NULL, whereas before it just threw up its hands if there were any
partitions.
In passing, clean up the API of generateClonedIndexStmt(): remove a
useless argument, ensure that the output argument is not left undefined,
update the header comment.
A small side effect of this change is that no-such-column errors in ALTER
TABLE ADD PRIMARY KEY now produce a different message that includes the
table name, because they are now detected by the SET NOT NULL step which
has historically worded its error that way. That seems fine to me, so
I didn't make any effort to avoid the wording change.
The basic bug #15580 is of very long standing, and these other bugs
aren't new in v12 either. However, this is a pretty significant change
in the way ALTER TABLE ADD PRIMARY KEY works. On balance it seems best
not to back-patch, at least not till we get some more confidence that
this patch has no new bugs.
Patch by me, but thanks to Jie Zhang for a preliminary version.
Discussion: https://postgr.es/m/15580-d1a6de5a3d65da51@postgresql.org
Discussion: https://postgr.es/m/1396E95157071C4EBBA51892C5368521017F2E6E63@G08CNEXMBPEKD02.g08.fujitsu.local
2019-04-23 18:25:27 +02:00
|
|
|
* If ALTER TABLE or CREATE TABLE .. PARTITION OF, check that there isn't
|
|
|
|
* already a PRIMARY KEY. In CREATE TABLE for an ordinary relation, we
|
2018-10-04 16:37:20 +02:00
|
|
|
* have faith that the parser rejected multiple pkey clauses; and CREATE
|
|
|
|
* INDEX doesn't have a way to say PRIMARY KEY, so it's no problem either.
|
2011-01-25 21:42:03 +01:00
|
|
|
*/
|
2018-10-04 16:37:20 +02:00
|
|
|
if ((is_alter_table || heapRel->rd_rel->relispartition) &&
|
2011-01-25 21:42:03 +01:00
|
|
|
relationHasPrimaryKey(heapRel))
|
|
|
|
{
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
errmsg("multiple primary keys for table \"%s\" are not allowed",
|
|
|
|
RelationGetRelationName(heapRel))));
|
2011-01-25 21:42:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that all of the attributes in a primary key are marked as not
|
Avoid order-of-execution problems with ALTER TABLE ADD PRIMARY KEY.
Up to now, DefineIndex() was responsible for adding attnotnull constraints
to the columns of a primary key, in any case where it hadn't been
convenient for transformIndexConstraint() to mark those columns as
is_not_null. It (or rather its minion index_check_primary_key) did this
by executing an ALTER TABLE SET NOT NULL command for the target table.
The trouble with this solution is that if we're creating the index due
to ALTER TABLE ADD PRIMARY KEY, and the outer ALTER TABLE has additional
sub-commands, the inner ALTER TABLE's operations executed at the wrong
time with respect to the outer ALTER TABLE's operations. In particular,
the inner ALTER would perform a validation scan at a point where the
table's storage might be inconsistent with its catalog entries. (This is
on the hairy edge of being a security problem, but AFAICS it isn't one
because the inner scan would only be interested in the tuples' null
bitmaps.) This can result in unexpected failures, such as the one seen
in bug #15580 from Allison Kaptur.
To fix, let's remove the attempt to do SET NOT NULL from DefineIndex(),
reducing index_check_primary_key's role to verifying that the columns are
already not null. (It shouldn't ever see such a case, but it seems wise
to keep the check for safety.) Instead, make transformIndexConstraint()
generate ALTER TABLE SET NOT NULL subcommands to be executed ahead of
the ADD PRIMARY KEY operation in every case where it can't force the
column to be created already-not-null. This requires only minor surgery
in parse_utilcmd.c, and it makes for a much more satisfying spec for
transformIndexConstraint(): it's no longer having to take it on faith
that someone else will handle addition of NOT NULL constraints.
To make that work, we have to move the execution of AT_SetNotNull into
an ALTER pass that executes ahead of AT_PASS_ADD_INDEX. I moved it to
AT_PASS_COL_ATTRS, and put that after AT_PASS_ADD_COL to avoid failure
when the column is being added in the same command. This incidentally
fixes a bug in the only previous usage of AT_PASS_COL_ATTRS, for
AT_SetIdentity: it didn't work either for a newly-added column.
Playing around with this exposed a separate bug in ALTER TABLE ONLY ...
ADD PRIMARY KEY for partitioned tables. The intent of the ONLY modifier
in that context is to prevent doing anything that would require holding
lock for a long time --- but the implied SET NOT NULL would recurse to
the child partitions, and do an expensive validation scan for any child
where the column(s) were not already NOT NULL. To fix that, invent a
new ALTER subcommand AT_CheckNotNull that just insists that a child
column be already NOT NULL, and apply that, not AT_SetNotNull, when
recursing to children in this scenario. This results in a slightly laxer
definition of ALTER TABLE ONLY ... SET NOT NULL for partitioned tables,
too: that command will now work as long as all children are already NOT
NULL, whereas before it just threw up its hands if there were any
partitions.
In passing, clean up the API of generateClonedIndexStmt(): remove a
useless argument, ensure that the output argument is not left undefined,
update the header comment.
A small side effect of this change is that no-such-column errors in ALTER
TABLE ADD PRIMARY KEY now produce a different message that includes the
table name, because they are now detected by the SET NOT NULL step which
has historically worded its error that way. That seems fine to me, so
I didn't make any effort to avoid the wording change.
The basic bug #15580 is of very long standing, and these other bugs
aren't new in v12 either. However, this is a pretty significant change
in the way ALTER TABLE ADD PRIMARY KEY works. On balance it seems best
not to back-patch, at least not till we get some more confidence that
this patch has no new bugs.
Patch by me, but thanks to Jie Zhang for a preliminary version.
Discussion: https://postgr.es/m/15580-d1a6de5a3d65da51@postgresql.org
Discussion: https://postgr.es/m/1396E95157071C4EBBA51892C5368521017F2E6E63@G08CNEXMBPEKD02.g08.fujitsu.local
2019-04-23 18:25:27 +02:00
|
|
|
* null. (We don't really expect to see that; it'd mean the parser messed
|
|
|
|
* up. But it seems wise to check anyway.)
|
2011-01-25 21:42:03 +01:00
|
|
|
*/
|
2018-04-07 22:00:39 +02:00
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++)
|
2011-01-25 21:42:03 +01:00
|
|
|
{
|
2018-04-12 12:02:45 +02:00
|
|
|
AttrNumber attnum = indexInfo->ii_IndexAttrNumbers[i];
|
2011-01-25 21:42:03 +01:00
|
|
|
HeapTuple atttuple;
|
|
|
|
Form_pg_attribute attform;
|
|
|
|
|
|
|
|
if (attnum == 0)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("primary keys cannot be expressions")));
|
|
|
|
|
|
|
|
/* System attributes are never null, so no need to check */
|
|
|
|
if (attnum < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
atttuple = SearchSysCache2(ATTNUM,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
ObjectIdGetDatum(RelationGetRelid(heapRel)),
|
2011-01-25 21:42:03 +01:00
|
|
|
Int16GetDatum(attnum));
|
|
|
|
if (!HeapTupleIsValid(atttuple))
|
|
|
|
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
|
|
|
|
attnum, RelationGetRelid(heapRel));
|
|
|
|
attform = (Form_pg_attribute) GETSTRUCT(atttuple);
|
|
|
|
|
|
|
|
if (!attform->attnotnull)
|
Avoid order-of-execution problems with ALTER TABLE ADD PRIMARY KEY.
Up to now, DefineIndex() was responsible for adding attnotnull constraints
to the columns of a primary key, in any case where it hadn't been
convenient for transformIndexConstraint() to mark those columns as
is_not_null. It (or rather its minion index_check_primary_key) did this
by executing an ALTER TABLE SET NOT NULL command for the target table.
The trouble with this solution is that if we're creating the index due
to ALTER TABLE ADD PRIMARY KEY, and the outer ALTER TABLE has additional
sub-commands, the inner ALTER TABLE's operations executed at the wrong
time with respect to the outer ALTER TABLE's operations. In particular,
the inner ALTER would perform a validation scan at a point where the
table's storage might be inconsistent with its catalog entries. (This is
on the hairy edge of being a security problem, but AFAICS it isn't one
because the inner scan would only be interested in the tuples' null
bitmaps.) This can result in unexpected failures, such as the one seen
in bug #15580 from Allison Kaptur.
To fix, let's remove the attempt to do SET NOT NULL from DefineIndex(),
reducing index_check_primary_key's role to verifying that the columns are
already not null. (It shouldn't ever see such a case, but it seems wise
to keep the check for safety.) Instead, make transformIndexConstraint()
generate ALTER TABLE SET NOT NULL subcommands to be executed ahead of
the ADD PRIMARY KEY operation in every case where it can't force the
column to be created already-not-null. This requires only minor surgery
in parse_utilcmd.c, and it makes for a much more satisfying spec for
transformIndexConstraint(): it's no longer having to take it on faith
that someone else will handle addition of NOT NULL constraints.
To make that work, we have to move the execution of AT_SetNotNull into
an ALTER pass that executes ahead of AT_PASS_ADD_INDEX. I moved it to
AT_PASS_COL_ATTRS, and put that after AT_PASS_ADD_COL to avoid failure
when the column is being added in the same command. This incidentally
fixes a bug in the only previous usage of AT_PASS_COL_ATTRS, for
AT_SetIdentity: it didn't work either for a newly-added column.
Playing around with this exposed a separate bug in ALTER TABLE ONLY ...
ADD PRIMARY KEY for partitioned tables. The intent of the ONLY modifier
in that context is to prevent doing anything that would require holding
lock for a long time --- but the implied SET NOT NULL would recurse to
the child partitions, and do an expensive validation scan for any child
where the column(s) were not already NOT NULL. To fix that, invent a
new ALTER subcommand AT_CheckNotNull that just insists that a child
column be already NOT NULL, and apply that, not AT_SetNotNull, when
recursing to children in this scenario. This results in a slightly laxer
definition of ALTER TABLE ONLY ... SET NOT NULL for partitioned tables,
too: that command will now work as long as all children are already NOT
NULL, whereas before it just threw up its hands if there were any
partitions.
In passing, clean up the API of generateClonedIndexStmt(): remove a
useless argument, ensure that the output argument is not left undefined,
update the header comment.
A small side effect of this change is that no-such-column errors in ALTER
TABLE ADD PRIMARY KEY now produce a different message that includes the
table name, because they are now detected by the SET NOT NULL step which
has historically worded its error that way. That seems fine to me, so
I didn't make any effort to avoid the wording change.
The basic bug #15580 is of very long standing, and these other bugs
aren't new in v12 either. However, this is a pretty significant change
in the way ALTER TABLE ADD PRIMARY KEY works. On balance it seems best
not to back-patch, at least not till we get some more confidence that
this patch has no new bugs.
Patch by me, but thanks to Jie Zhang for a preliminary version.
Discussion: https://postgr.es/m/15580-d1a6de5a3d65da51@postgresql.org
Discussion: https://postgr.es/m/1396E95157071C4EBBA51892C5368521017F2E6E63@G08CNEXMBPEKD02.g08.fujitsu.local
2019-04-23 18:25:27 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
|
|
|
|
errmsg("primary key column \"%s\" is not marked NOT NULL",
|
|
|
|
NameStr(attform->attname))));
|
2011-01-25 21:42:03 +01:00
|
|
|
|
|
|
|
ReleaseSysCache(atttuple);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
/*
|
1997-09-07 07:04:48 +02:00
|
|
|
* ConstructTupleDescriptor
|
2000-07-15 00:18:02 +02:00
|
|
|
*
|
2003-05-28 18:04:02 +02:00
|
|
|
* Build an index tuple descriptor for a new index
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
1997-09-08 04:41:22 +02:00
|
|
|
static TupleDesc
|
2001-01-24 01:06:07 +01:00
|
|
|
ConstructTupleDescriptor(Relation heapRelation,
|
2003-05-28 18:04:02 +02:00
|
|
|
IndexInfo *indexInfo,
|
Adjust naming of indexes and their columns per recent discussion.
Index expression columns are now named after the FigureColname result for
their expressions, rather than always being "pg_expression_N". Digits are
appended to this name if needed to make the column name unique within the
index. (That happens for regular columns too, thus fixing the old problem
that CREATE INDEX fooi ON foo (f1, f1) fails. Before exclusion indexes
there was no real reason to do such a thing, but now maybe there is.)
Default names for indexes and associated constraints now include the column
names of all their columns, not only the first one as in previous practice.
(Of course, this will be truncated as needed to fit in NAMEDATALEN. Also,
pkey indexes retain the historical behavior of not naming specific columns
at all.)
An example of the results:
regression=# create table foo (f1 int, f2 text,
regression(# exclude (f1 with =, lower(f2) with =));
NOTICE: CREATE TABLE / EXCLUDE will create implicit index "foo_f1_lower_exclusion" for table "foo"
CREATE TABLE
regression=# \d foo_f1_lower_exclusion
Index "public.foo_f1_lower_exclusion"
Column | Type | Definition
--------+---------+------------
f1 | integer | f1
lower | text | lower(f2)
btree, for table "public.foo"
2009-12-23 03:35:25 +01:00
|
|
|
List *indexColNames,
|
2008-09-15 20:43:41 +02:00
|
|
|
Oid accessMethodObjectId,
|
2011-02-08 22:04:18 +01:00
|
|
|
Oid *collationObjectId,
|
2001-08-22 20:24:26 +02:00
|
|
|
Oid *classObjectId)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2003-05-28 18:04:02 +02:00
|
|
|
int numatts = indexInfo->ii_NumIndexAttrs;
|
2018-04-12 15:37:22 +02:00
|
|
|
int numkeyatts = indexInfo->ii_NumIndexKeyAttrs;
|
Adjust naming of indexes and their columns per recent discussion.
Index expression columns are now named after the FigureColname result for
their expressions, rather than always being "pg_expression_N". Digits are
appended to this name if needed to make the column name unique within the
index. (That happens for regular columns too, thus fixing the old problem
that CREATE INDEX fooi ON foo (f1, f1) fails. Before exclusion indexes
there was no real reason to do such a thing, but now maybe there is.)
Default names for indexes and associated constraints now include the column
names of all their columns, not only the first one as in previous practice.
(Of course, this will be truncated as needed to fit in NAMEDATALEN. Also,
pkey indexes retain the historical behavior of not naming specific columns
at all.)
An example of the results:
regression=# create table foo (f1 int, f2 text,
regression(# exclude (f1 with =, lower(f2) with =));
NOTICE: CREATE TABLE / EXCLUDE will create implicit index "foo_f1_lower_exclusion" for table "foo"
CREATE TABLE
regression=# \d foo_f1_lower_exclusion
Index "public.foo_f1_lower_exclusion"
Column | Type | Definition
--------+---------+------------
f1 | integer | f1
lower | text | lower(f2)
btree, for table "public.foo"
2009-12-23 03:35:25 +01:00
|
|
|
ListCell *colnames_item = list_head(indexColNames);
|
2004-05-26 06:41:50 +02:00
|
|
|
ListCell *indexpr_item = list_head(indexInfo->ii_Expressions);
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
IndexAmRoutine *amroutine;
|
1997-09-08 04:41:22 +02:00
|
|
|
TupleDesc heapTupDesc;
|
|
|
|
TupleDesc indexTupDesc;
|
2000-07-15 00:18:02 +02:00
|
|
|
int natts; /* #atts in heap rel --- for error checks */
|
1997-09-08 04:41:22 +02:00
|
|
|
int i;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
/* We need access to the index AM's API struct */
|
2016-08-14 00:31:14 +02:00
|
|
|
amroutine = GetIndexAmRoutineByAmId(accessMethodObjectId, false);
|
2008-09-15 20:43:41 +02:00
|
|
|
|
|
|
|
/* ... and to the table's tuple descriptor */
|
2000-07-15 00:18:02 +02:00
|
|
|
heapTupDesc = RelationGetDescr(heapRelation);
|
|
|
|
natts = RelationGetForm(heapRelation)->relnatts;
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* allocate the new tuple descriptor
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
indexTupDesc = CreateTemplateTupleDesc(numatts);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
/*
|
2018-08-27 15:50:50 +02:00
|
|
|
* Fill in the pg_attribute row.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2000-07-15 00:18:02 +02:00
|
|
|
for (i = 0; i < numatts; i++)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2018-04-12 12:02:45 +02:00
|
|
|
AttrNumber atnum = indexInfo->ii_IndexAttrNumbers[i];
|
2017-08-20 20:19:07 +02:00
|
|
|
Form_pg_attribute to = TupleDescAttr(indexTupDesc, i);
|
2001-08-22 20:24:26 +02:00
|
|
|
HeapTuple tuple;
|
2003-05-28 18:04:02 +02:00
|
|
|
Form_pg_type typeTup;
|
2008-09-15 20:43:41 +02:00
|
|
|
Form_pg_opclass opclassTup;
|
2001-08-22 20:24:26 +02:00
|
|
|
Oid keyType;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2018-08-27 15:50:50 +02:00
|
|
|
MemSet(to, 0, ATTRIBUTE_FIXED_PART_SIZE);
|
|
|
|
to->attnum = i + 1;
|
|
|
|
to->attstattarget = -1;
|
|
|
|
to->attcacheoff = -1;
|
|
|
|
to->attislocal = true;
|
|
|
|
to->attcollation = (i < numkeyatts) ?
|
|
|
|
collationObjectId[i] : InvalidOid;
|
|
|
|
|
2019-12-17 23:44:27 +01:00
|
|
|
/*
|
|
|
|
* Set the attribute name as specified by caller.
|
|
|
|
*/
|
|
|
|
if (colnames_item == NULL) /* shouldn't happen */
|
|
|
|
elog(ERROR, "too few entries in colnames list");
|
|
|
|
namestrcpy(&to->attname, (const char *) lfirst(colnames_item));
|
|
|
|
colnames_item = lnext(indexColNames, colnames_item);
|
|
|
|
|
2018-08-27 15:50:50 +02:00
|
|
|
/*
|
|
|
|
* For simple index columns, we copy some pg_attribute fields from the
|
|
|
|
* parent relation. For expressions we have to look at the expression
|
|
|
|
* result.
|
|
|
|
*/
|
2003-05-28 18:04:02 +02:00
|
|
|
if (atnum != 0)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2003-05-28 18:04:02 +02:00
|
|
|
/* Simple index column */
|
2018-10-16 18:44:43 +02:00
|
|
|
const FormData_pg_attribute *from;
|
2003-05-28 18:04:02 +02:00
|
|
|
|
2019-01-15 18:07:10 +01:00
|
|
|
Assert(atnum > 0); /* should've been caught above */
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
|
|
|
|
if (atnum > natts) /* safety check */
|
|
|
|
elog(ERROR, "invalid column number %d", atnum);
|
|
|
|
from = TupleDescAttr(heapTupDesc,
|
|
|
|
AttrNumberGetAttrOffset(atnum));
|
2003-05-28 18:04:02 +02:00
|
|
|
|
2018-08-27 15:50:50 +02:00
|
|
|
to->atttypid = from->atttypid;
|
|
|
|
to->attlen = from->attlen;
|
|
|
|
to->attndims = from->attndims;
|
|
|
|
to->atttypmod = from->atttypmod;
|
|
|
|
to->attbyval = from->attbyval;
|
|
|
|
to->attstorage = from->attstorage;
|
|
|
|
to->attalign = from->attalign;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2003-05-28 18:04:02 +02:00
|
|
|
/* Expressional index */
|
|
|
|
Node *indexkey;
|
|
|
|
|
2004-05-26 06:41:50 +02:00
|
|
|
if (indexpr_item == NULL) /* shouldn't happen */
|
2003-05-28 18:04:02 +02:00
|
|
|
elog(ERROR, "too few entries in indexprs list");
|
2004-05-26 06:41:50 +02:00
|
|
|
indexkey = (Node *) lfirst(indexpr_item);
|
Represent Lists as expansible arrays, not chains of cons-cells.
Originally, Postgres Lists were a more or less exact reimplementation of
Lisp lists, which consist of chains of separately-allocated cons cells,
each having a value and a next-cell link. We'd hacked that once before
(commit d0b4399d8) to add a separate List header, but the data was still
in cons cells. That makes some operations -- notably list_nth() -- O(N),
and it's bulky because of the next-cell pointers and per-cell palloc
overhead, and it's very cache-unfriendly if the cons cells end up
scattered around rather than being adjacent.
In this rewrite, we still have List headers, but the data is in a
resizable array of values, with no next-cell links. Now we need at
most two palloc's per List, and often only one, since we can allocate
some values in the same palloc call as the List header. (Of course,
extending an existing List may require repalloc's to enlarge the array.
But this involves just O(log N) allocations not O(N).)
Of course this is not without downsides. The key difficulty is that
addition or deletion of a list entry may now cause other entries to
move, which it did not before.
For example, that breaks foreach() and sister macros, which historically
used a pointer to the current cons-cell as loop state. We can repair
those macros transparently by making their actual loop state be an
integer list index; the exposed "ListCell *" pointer is no longer state
carried across loop iterations, but is just a derived value. (In
practice, modern compilers can optimize things back to having just one
loop state value, at least for simple cases with inline loop bodies.)
In principle, this is a semantics change for cases where the loop body
inserts or deletes list entries ahead of the current loop index; but
I found no such cases in the Postgres code.
The change is not at all transparent for code that doesn't use foreach()
but chases lists "by hand" using lnext(). The largest share of such
code in the backend is in loops that were maintaining "prev" and "next"
variables in addition to the current-cell pointer, in order to delete
list cells efficiently using list_delete_cell(). However, we no longer
need a previous-cell pointer to delete a list cell efficiently. Keeping
a next-cell pointer doesn't work, as explained above, but we can improve
matters by changing such code to use a regular foreach() loop and then
using the new macro foreach_delete_current() to delete the current cell.
(This macro knows how to update the associated foreach loop's state so
that no cells will be missed in the traversal.)
There remains a nontrivial risk of code assuming that a ListCell *
pointer will remain good over an operation that could now move the list
contents. To help catch such errors, list.c can be compiled with a new
define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents
whenever that could possibly happen. This makes list operations
significantly more expensive so it's not normally turned on (though it
is on by default if USE_VALGRIND is on).
There are two notable API differences from the previous code:
* lnext() now requires the List's header pointer in addition to the
current cell's address.
* list_delete_cell() no longer requires a previous-cell argument.
These changes are somewhat unfortunate, but on the other hand code using
either function needs inspection to see if it is assuming anything
it shouldn't, so it's not all bad.
Programmers should be aware of these significant performance changes:
* list_nth() and related functions are now O(1); so there's no
major access-speed difference between a list and an array.
* Inserting or deleting a list element now takes time proportional to
the distance to the end of the list, due to moving the array elements.
(However, it typically *doesn't* require palloc or pfree, so except in
long lists it's probably still faster than before.) Notably, lcons()
used to be about the same cost as lappend(), but that's no longer true
if the list is long. Code that uses lcons() and list_delete_first()
to maintain a stack might usefully be rewritten to push and pop at the
end of the list rather than the beginning.
* There are now list_insert_nth...() and list_delete_nth...() functions
that add or remove a list cell identified by index. These have the
data-movement penalty explained above, but there's no search penalty.
* list_concat() and variants now copy the second list's data into
storage belonging to the first list, so there is no longer any
sharing of cells between the input lists. The second argument is
now declared "const List *" to reflect that it isn't changed.
This patch just does the minimum needed to get the new implementation
in place and fix bugs exposed by the regression tests. As suggested
by the foregoing, there's a fair amount of followup work remaining to
do.
Also, the ENABLE_LIST_COMPAT macros are finally removed in this
commit. Code using those should have been gone a dozen years ago.
Patch by me; thanks to David Rowley, Jesper Pedersen, and others
for review.
Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
|
|
|
indexpr_item = lnext(indexInfo->ii_Expressions, indexpr_item);
|
2003-05-28 18:04:02 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Lookup the expression type in pg_type for the type length etc.
|
2003-05-28 18:04:02 +02:00
|
|
|
*/
|
|
|
|
keyType = exprType(indexkey);
|
2010-02-14 19:42:19 +01:00
|
|
|
tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(keyType));
|
2003-05-28 18:04:02 +02:00
|
|
|
if (!HeapTupleIsValid(tuple))
|
2003-07-21 03:59:11 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", keyType);
|
2003-05-28 18:04:02 +02:00
|
|
|
typeTup = (Form_pg_type) GETSTRUCT(tuple);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
/*
|
2018-08-27 15:50:50 +02:00
|
|
|
* Assign some of the attributes values. Leave the rest.
|
2003-05-28 18:04:02 +02:00
|
|
|
*/
|
|
|
|
to->atttypid = keyType;
|
|
|
|
to->attlen = typeTup->typlen;
|
|
|
|
to->attbyval = typeTup->typbyval;
|
|
|
|
to->attstorage = typeTup->typstorage;
|
|
|
|
to->attalign = typeTup->typalign;
|
2014-04-26 18:22:09 +02:00
|
|
|
to->atttypmod = exprTypmod(indexkey);
|
2011-03-04 22:39:44 +01:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
ReleaseSysCache(tuple);
|
2008-10-14 23:47:39 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure the expression yields a type that's safe to store in
|
|
|
|
* an index. We need this defense because we have index opclasses
|
|
|
|
* for pseudo-types such as "record", and the actually stored type
|
|
|
|
* had better be safe; eg, a named composite type is okay, an
|
|
|
|
* anonymous record type is not. The test is the same as for
|
|
|
|
* whether a table column is of a safe type (which is why we
|
|
|
|
* needn't check for the non-expression case).
|
|
|
|
*/
|
2011-03-28 21:44:54 +02:00
|
|
|
CheckAttributeType(NameStr(to->attname),
|
|
|
|
to->atttypid, to->attcollation,
|
2019-01-31 01:25:33 +01:00
|
|
|
NIL, 0);
|
2003-05-28 18:04:02 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-01-24 01:06:07 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* We do not yet have the correct relation OID for the index, so just
|
2014-05-06 18:12:18 +02:00
|
|
|
* set it invalid for now. InitializeAttributeOids() will fix it
|
2005-10-15 04:49:52 +02:00
|
|
|
* later.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2001-01-24 01:06:07 +01:00
|
|
|
to->attrelid = InvalidOid;
|
2001-08-22 20:24:26 +02:00
|
|
|
|
|
|
|
/*
|
2008-09-15 20:43:41 +02:00
|
|
|
* Check the opclass and index AM to see if either provides a keytype
|
2018-04-07 22:00:39 +02:00
|
|
|
* (overriding the attribute type). Opclass (if exists) takes
|
|
|
|
* precedence.
|
2001-08-22 20:24:26 +02:00
|
|
|
*/
|
2018-04-07 22:00:39 +02:00
|
|
|
keyType = amroutine->amkeytype;
|
Replace the built-in GIN array opclasses with a single polymorphic opclass.
We had thirty different GIN array opclasses sharing the same operators and
support functions. That still didn't cover all the built-in types, nor
did it cover arrays of extension-added types. What we want is a single
polymorphic opclass for "anyarray". There were two missing features needed
to make this possible:
1. We have to be able to declare the index storage type as ANYELEMENT
when the opclass is declared to index ANYARRAY. This just takes a few
more lines in index_create(). Although this currently seems of use only
for GIN, there's no reason to make index_create() restrict it to that.
2. We have to be able to identify the proper GIN compare function for
the index storage type. This patch proceeds by making the compare function
optional in GIN opclass definitions, and specifying that the default btree
comparison function for the index storage type will be looked up when the
opclass omits it. Again, that seems pretty generically useful.
Since the comparison function lookup is done in initGinState(), making
use of the second feature adds an additional cache lookup to GIN index
access setup. It seems unlikely that that would be very noticeable given
the other costs involved, but maybe at some point we should consider
making GinState data persist longer than it now does --- we could keep it
in the index relcache entry, perhaps.
Rather fortuitously, we don't seem to need to do anything to get this
change to play nice with dump/reload or pg_upgrade scenarios: the new
opclass definition is automatically selected to replace existing index
definitions, and the on-disk data remains compatible. Also, if a user has
created a custom opclass definition for a non-builtin type, this doesn't
break that, since CREATE INDEX will prefer an exact match to opcintype
over a match to ANYARRAY. However, if there's anyone out there with
handwritten DDL that explicitly specifies _bool_ops or one of the other
replaced opclass names, they'll need to adjust that.
Tom Lane, reviewed by Enrique Meneses
Discussion: <14436.1470940379@sss.pgh.pa.us>
2016-09-26 20:52:44 +02:00
|
|
|
|
2018-04-07 22:00:39 +02:00
|
|
|
if (i < indexInfo->ii_NumIndexKeyAttrs)
|
Replace the built-in GIN array opclasses with a single polymorphic opclass.
We had thirty different GIN array opclasses sharing the same operators and
support functions. That still didn't cover all the built-in types, nor
did it cover arrays of extension-added types. What we want is a single
polymorphic opclass for "anyarray". There were two missing features needed
to make this possible:
1. We have to be able to declare the index storage type as ANYELEMENT
when the opclass is declared to index ANYARRAY. This just takes a few
more lines in index_create(). Although this currently seems of use only
for GIN, there's no reason to make index_create() restrict it to that.
2. We have to be able to identify the proper GIN compare function for
the index storage type. This patch proceeds by making the compare function
optional in GIN opclass definitions, and specifying that the default btree
comparison function for the index storage type will be looked up when the
opclass omits it. Again, that seems pretty generically useful.
Since the comparison function lookup is done in initGinState(), making
use of the second feature adds an additional cache lookup to GIN index
access setup. It seems unlikely that that would be very noticeable given
the other costs involved, but maybe at some point we should consider
making GinState data persist longer than it now does --- we could keep it
in the index relcache entry, perhaps.
Rather fortuitously, we don't seem to need to do anything to get this
change to play nice with dump/reload or pg_upgrade scenarios: the new
opclass definition is automatically selected to replace existing index
definitions, and the on-disk data remains compatible. Also, if a user has
created a custom opclass definition for a non-builtin type, this doesn't
break that, since CREATE INDEX will prefer an exact match to opcintype
over a match to ANYARRAY. However, if there's anyone out there with
handwritten DDL that explicitly specifies _bool_ops or one of the other
replaced opclass names, they'll need to adjust that.
Tom Lane, reviewed by Enrique Meneses
Discussion: <14436.1470940379@sss.pgh.pa.us>
2016-09-26 20:52:44 +02:00
|
|
|
{
|
2018-04-07 22:00:39 +02:00
|
|
|
tuple = SearchSysCache1(CLAOID, ObjectIdGetDatum(classObjectId[i]));
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
|
|
|
elog(ERROR, "cache lookup failed for opclass %u",
|
|
|
|
classObjectId[i]);
|
|
|
|
opclassTup = (Form_pg_opclass) GETSTRUCT(tuple);
|
|
|
|
if (OidIsValid(opclassTup->opckeytype))
|
|
|
|
keyType = opclassTup->opckeytype;
|
Replace the built-in GIN array opclasses with a single polymorphic opclass.
We had thirty different GIN array opclasses sharing the same operators and
support functions. That still didn't cover all the built-in types, nor
did it cover arrays of extension-added types. What we want is a single
polymorphic opclass for "anyarray". There were two missing features needed
to make this possible:
1. We have to be able to declare the index storage type as ANYELEMENT
when the opclass is declared to index ANYARRAY. This just takes a few
more lines in index_create(). Although this currently seems of use only
for GIN, there's no reason to make index_create() restrict it to that.
2. We have to be able to identify the proper GIN compare function for
the index storage type. This patch proceeds by making the compare function
optional in GIN opclass definitions, and specifying that the default btree
comparison function for the index storage type will be looked up when the
opclass omits it. Again, that seems pretty generically useful.
Since the comparison function lookup is done in initGinState(), making
use of the second feature adds an additional cache lookup to GIN index
access setup. It seems unlikely that that would be very noticeable given
the other costs involved, but maybe at some point we should consider
making GinState data persist longer than it now does --- we could keep it
in the index relcache entry, perhaps.
Rather fortuitously, we don't seem to need to do anything to get this
change to play nice with dump/reload or pg_upgrade scenarios: the new
opclass definition is automatically selected to replace existing index
definitions, and the on-disk data remains compatible. Also, if a user has
created a custom opclass definition for a non-builtin type, this doesn't
break that, since CREATE INDEX will prefer an exact match to opcintype
over a match to ANYARRAY. However, if there's anyone out there with
handwritten DDL that explicitly specifies _bool_ops or one of the other
replaced opclass names, they'll need to adjust that.
Tom Lane, reviewed by Enrique Meneses
Discussion: <14436.1470940379@sss.pgh.pa.us>
2016-09-26 20:52:44 +02:00
|
|
|
|
2018-04-07 22:00:39 +02:00
|
|
|
/*
|
|
|
|
* If keytype is specified as ANYELEMENT, and opcintype is
|
|
|
|
* ANYARRAY, then the attribute type must be an array (else it'd
|
|
|
|
* not have matched this opclass); use its element type.
|
Introduce "anycompatible" family of polymorphic types.
This patch adds the pseudo-types anycompatible, anycompatiblearray,
anycompatiblenonarray, and anycompatiblerange. They work much like
anyelement, anyarray, anynonarray, and anyrange respectively, except
that the actual input values need not match precisely in type.
Instead, if we can find a common supertype (using the same rules
as for UNION/CASE type resolution), then the parser automatically
promotes the input values to that type. For example,
"myfunc(anycompatible, anycompatible)" can match a call with one
integer and one bigint argument, with the integer automatically
promoted to bigint. With anyelement in the definition, the user
would have had to cast the integer explicitly.
The new types also provide a second, independent set of type variables
for function matching; thus with "myfunc(anyelement, anyelement,
anycompatible) returns anycompatible" the first two arguments are
constrained to be the same type, but the third can be some other
type, and the result has the type of the third argument. The need
for more than one set of type variables was foreseen back when we
first invented the polymorphic types, but we never did anything
about it.
Pavel Stehule, revised a bit by me
Discussion: https://postgr.es/m/CAFj8pRDna7VqNi8gR+Tt2Ktmz0cq5G93guc3Sbn_NVPLdXAkqA@mail.gmail.com
2020-03-19 16:43:11 +01:00
|
|
|
*
|
|
|
|
* We could also allow ANYCOMPATIBLE/ANYCOMPATIBLEARRAY here, but
|
|
|
|
* there seems no need to do so; there's no reason to declare an
|
|
|
|
* opclass as taking ANYCOMPATIBLEARRAY rather than ANYARRAY.
|
2018-04-07 22:00:39 +02:00
|
|
|
*/
|
|
|
|
if (keyType == ANYELEMENTOID && opclassTup->opcintype == ANYARRAYOID)
|
|
|
|
{
|
|
|
|
keyType = get_base_element_type(to->atttypid);
|
|
|
|
if (!OidIsValid(keyType))
|
|
|
|
elog(ERROR, "could not get element type of array type %u",
|
|
|
|
to->atttypid);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
}
|
2001-08-22 20:24:26 +02:00
|
|
|
|
Replace the built-in GIN array opclasses with a single polymorphic opclass.
We had thirty different GIN array opclasses sharing the same operators and
support functions. That still didn't cover all the built-in types, nor
did it cover arrays of extension-added types. What we want is a single
polymorphic opclass for "anyarray". There were two missing features needed
to make this possible:
1. We have to be able to declare the index storage type as ANYELEMENT
when the opclass is declared to index ANYARRAY. This just takes a few
more lines in index_create(). Although this currently seems of use only
for GIN, there's no reason to make index_create() restrict it to that.
2. We have to be able to identify the proper GIN compare function for
the index storage type. This patch proceeds by making the compare function
optional in GIN opclass definitions, and specifying that the default btree
comparison function for the index storage type will be looked up when the
opclass omits it. Again, that seems pretty generically useful.
Since the comparison function lookup is done in initGinState(), making
use of the second feature adds an additional cache lookup to GIN index
access setup. It seems unlikely that that would be very noticeable given
the other costs involved, but maybe at some point we should consider
making GinState data persist longer than it now does --- we could keep it
in the index relcache entry, perhaps.
Rather fortuitously, we don't seem to need to do anything to get this
change to play nice with dump/reload or pg_upgrade scenarios: the new
opclass definition is automatically selected to replace existing index
definitions, and the on-disk data remains compatible. Also, if a user has
created a custom opclass definition for a non-builtin type, this doesn't
break that, since CREATE INDEX will prefer an exact match to opcintype
over a match to ANYARRAY. However, if there's anyone out there with
handwritten DDL that explicitly specifies _bool_ops or one of the other
replaced opclass names, they'll need to adjust that.
Tom Lane, reviewed by Enrique Meneses
Discussion: <14436.1470940379@sss.pgh.pa.us>
2016-09-26 20:52:44 +02:00
|
|
|
/*
|
|
|
|
* If a key type different from the heap value is specified, update
|
|
|
|
* the type-related fields in the index tupdesc.
|
|
|
|
*/
|
2001-08-22 20:24:26 +02:00
|
|
|
if (OidIsValid(keyType) && keyType != to->atttypid)
|
|
|
|
{
|
2010-02-14 19:42:19 +01:00
|
|
|
tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(keyType));
|
2001-08-22 20:24:26 +02:00
|
|
|
if (!HeapTupleIsValid(tuple))
|
2003-07-21 03:59:11 +02:00
|
|
|
elog(ERROR, "cache lookup failed for type %u", keyType);
|
2001-08-22 20:24:26 +02:00
|
|
|
typeTup = (Form_pg_type) GETSTRUCT(tuple);
|
|
|
|
|
2001-10-25 07:50:21 +02:00
|
|
|
to->atttypid = keyType;
|
|
|
|
to->atttypmod = -1;
|
|
|
|
to->attlen = typeTup->typlen;
|
|
|
|
to->attbyval = typeTup->typbyval;
|
|
|
|
to->attalign = typeTup->typalign;
|
2001-08-22 20:24:26 +02:00
|
|
|
to->attstorage = typeTup->typstorage;
|
|
|
|
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
Restructure index access method API to hide most of it at the C level.
This patch reduces pg_am to just two columns, a name and a handler
function. All the data formerly obtained from pg_am is now provided
in a C struct returned by the handler function. This is similar to
the designs we've adopted for FDWs and tablesample methods. There
are multiple advantages. For one, the index AM's support functions
are now simple C functions, making them faster to call and much less
error-prone, since the C compiler can now check function signatures.
For another, this will make it far more practical to define index access
methods in installable extensions.
A disadvantage is that SQL-level code can no longer see attributes
of index AMs; in particular, some of the crosschecks in the opr_sanity
regression test are no longer possible from SQL. We've addressed that
by adding a facility for the index AM to perform such checks instead.
(Much more could be done in that line, but for now we're content if the
amvalidate functions more or less replace what opr_sanity used to do.)
We might also want to expose some sort of reporting functionality, but
this patch doesn't do that.
Alexander Korotkov, reviewed by Petr Jelínek, and rather heavily
editorialized on by me.
2016-01-18 01:36:59 +01:00
|
|
|
pfree(amroutine);
|
2008-09-15 20:43:41 +02:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
return indexTupDesc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* InitializeAttributeOids
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
InitializeAttributeOids(Relation indexRelation,
|
|
|
|
int numatts,
|
|
|
|
Oid indexoid)
|
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
TupleDesc tupleDescriptor;
|
|
|
|
int i;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1998-09-01 05:29:17 +02:00
|
|
|
tupleDescriptor = RelationGetDescr(indexRelation);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
for (i = 0; i < numatts; i += 1)
|
2017-08-20 20:19:07 +02:00
|
|
|
TupleDescAttr(tupleDescriptor, i)->attrelid = indexoid;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* AppendAttributeTuples
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
static void
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
AppendAttributeTuples(Relation indexRelation, int numatts, Datum *attopts)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
Relation pg_attribute;
|
2002-08-05 05:29:17 +02:00
|
|
|
CatalogIndexState indstate;
|
1997-09-08 04:41:22 +02:00
|
|
|
TupleDesc indexTupDesc;
|
|
|
|
int i;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2002-08-05 05:29:17 +02:00
|
|
|
* open the attribute relation and its indexes
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_attribute = table_open(AttributeRelationId, RowExclusiveLock);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-08-05 05:29:17 +02:00
|
|
|
indstate = CatalogOpenIndexes(pg_attribute);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2001-06-12 07:55:50 +02:00
|
|
|
* insert data from new index's tupdesc into pg_attribute
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
1998-09-01 05:29:17 +02:00
|
|
|
indexTupDesc = RelationGetDescr(indexRelation);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-06-12 07:55:50 +02:00
|
|
|
for (i = 0; i < numatts; i++)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2017-08-20 20:19:07 +02:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(indexTupDesc, i);
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
Datum attoptions = attopts ? attopts[i] : (Datum) 0;
|
2017-08-20 20:19:07 +02:00
|
|
|
|
|
|
|
Assert(attr->attnum == i + 1);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
InsertPgAttributeTuple(pg_attribute, attr, attoptions, indstate);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2002-08-05 05:29:17 +02:00
|
|
|
CatalogCloseIndexes(indstate);
|
2001-06-12 07:55:50 +02:00
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_attribute, RowExclusiveLock);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* UpdateIndexRelation
|
2006-08-25 06:06:58 +02:00
|
|
|
*
|
|
|
|
* Construct and insert a new entry in the pg_index catalog
|
1997-09-07 07:04:48 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
UpdateIndexRelation(Oid indexoid,
|
|
|
|
Oid heapoid,
|
2019-06-17 09:13:16 +02:00
|
|
|
Oid parentIndexId,
|
2000-07-15 00:18:02 +02:00
|
|
|
IndexInfo *indexInfo,
|
2011-02-08 22:04:18 +01:00
|
|
|
Oid *collationOids,
|
1998-09-01 05:29:17 +02:00
|
|
|
Oid *classOids,
|
2007-01-09 03:14:16 +01:00
|
|
|
int16 *coloptions,
|
2006-08-25 06:06:58 +02:00
|
|
|
bool primary,
|
2011-01-25 23:51:59 +01:00
|
|
|
bool isexclusion,
|
2009-07-29 22:56:21 +02:00
|
|
|
bool immediate,
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
bool isvalid,
|
|
|
|
bool isready)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2005-03-29 02:17:27 +02:00
|
|
|
int2vector *indkey;
|
2011-02-08 22:04:18 +01:00
|
|
|
oidvector *indcollation;
|
2005-03-29 02:17:27 +02:00
|
|
|
oidvector *indclass;
|
2007-01-09 03:14:16 +01:00
|
|
|
int2vector *indoption;
|
2003-05-28 18:04:02 +02:00
|
|
|
Datum exprsDatum;
|
2002-09-27 17:05:23 +02:00
|
|
|
Datum predDatum;
|
|
|
|
Datum values[Natts_pg_index];
|
2008-11-02 02:45:28 +01:00
|
|
|
bool nulls[Natts_pg_index];
|
1997-09-08 04:41:22 +02:00
|
|
|
Relation pg_index;
|
|
|
|
HeapTuple tuple;
|
|
|
|
int i;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2007-01-09 03:14:16 +01:00
|
|
|
* Copy the index key, opclass, and indoption info into arrays (should we
|
|
|
|
* make the caller pass them like this to start with?)
|
2002-09-27 17:05:23 +02:00
|
|
|
*/
|
2005-03-29 02:17:27 +02:00
|
|
|
indkey = buildint2vector(NULL, indexInfo->ii_NumIndexAttrs);
|
2002-09-27 17:05:23 +02:00
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
|
2018-04-12 12:02:45 +02:00
|
|
|
indkey->values[i] = indexInfo->ii_IndexAttrNumbers[i];
|
2018-04-12 15:37:22 +02:00
|
|
|
indcollation = buildoidvector(collationOids, indexInfo->ii_NumIndexKeyAttrs);
|
2018-04-07 22:00:39 +02:00
|
|
|
indclass = buildoidvector(classOids, indexInfo->ii_NumIndexKeyAttrs);
|
2018-04-12 15:37:22 +02:00
|
|
|
indoption = buildint2vector(coloptions, indexInfo->ii_NumIndexKeyAttrs);
|
2003-05-28 18:04:02 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert the index expressions (if any) to a text datum
|
|
|
|
*/
|
|
|
|
if (indexInfo->ii_Expressions != NIL)
|
|
|
|
{
|
|
|
|
char *exprsString;
|
|
|
|
|
|
|
|
exprsString = nodeToString(indexInfo->ii_Expressions);
|
2008-03-25 23:42:46 +01:00
|
|
|
exprsDatum = CStringGetTextDatum(exprsString);
|
2003-05-28 18:04:02 +02:00
|
|
|
pfree(exprsString);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
exprsDatum = (Datum) 0;
|
2002-09-27 17:05:23 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Convert the index predicate (if any) to a text datum. Note we convert
|
|
|
|
* implicit-AND format to normal explicit-AND for storage.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2001-07-16 07:07:00 +02:00
|
|
|
if (indexInfo->ii_Predicate != NIL)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2002-09-27 17:05:23 +02:00
|
|
|
char *predString;
|
|
|
|
|
2003-12-28 22:57:37 +01:00
|
|
|
predString = nodeToString(make_ands_explicit(indexInfo->ii_Predicate));
|
2008-03-25 23:42:46 +01:00
|
|
|
predDatum = CStringGetTextDatum(predString);
|
1997-09-07 07:04:48 +02:00
|
|
|
pfree(predString);
|
|
|
|
}
|
|
|
|
else
|
2003-05-28 18:04:02 +02:00
|
|
|
predDatum = (Datum) 0;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* open the system catalog index relation
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_index = table_open(IndexRelationId, RowExclusiveLock);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2002-09-27 17:05:23 +02:00
|
|
|
* Build a pg_index tuple
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2008-11-02 02:45:28 +01:00
|
|
|
MemSet(nulls, false, sizeof(nulls));
|
2002-09-27 17:05:23 +02:00
|
|
|
|
|
|
|
values[Anum_pg_index_indexrelid - 1] = ObjectIdGetDatum(indexoid);
|
|
|
|
values[Anum_pg_index_indrelid - 1] = ObjectIdGetDatum(heapoid);
|
2003-05-28 18:04:02 +02:00
|
|
|
values[Anum_pg_index_indnatts - 1] = Int16GetDatum(indexInfo->ii_NumIndexAttrs);
|
2018-04-07 22:00:39 +02:00
|
|
|
values[Anum_pg_index_indnkeyatts - 1] = Int16GetDatum(indexInfo->ii_NumIndexKeyAttrs);
|
2002-09-27 17:05:23 +02:00
|
|
|
values[Anum_pg_index_indisunique - 1] = BoolGetDatum(indexInfo->ii_Unique);
|
|
|
|
values[Anum_pg_index_indisprimary - 1] = BoolGetDatum(primary);
|
2011-01-25 23:51:59 +01:00
|
|
|
values[Anum_pg_index_indisexclusion - 1] = BoolGetDatum(isexclusion);
|
2009-07-29 22:56:21 +02:00
|
|
|
values[Anum_pg_index_indimmediate - 1] = BoolGetDatum(immediate);
|
2003-05-28 18:04:02 +02:00
|
|
|
values[Anum_pg_index_indisclustered - 1] = BoolGetDatum(false);
|
2006-08-25 06:06:58 +02:00
|
|
|
values[Anum_pg_index_indisvalid - 1] = BoolGetDatum(isvalid);
|
2007-09-20 19:56:33 +02:00
|
|
|
values[Anum_pg_index_indcheckxmin - 1] = BoolGetDatum(false);
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
values[Anum_pg_index_indisready - 1] = BoolGetDatum(isready);
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
values[Anum_pg_index_indislive - 1] = BoolGetDatum(true);
|
2013-11-08 18:30:43 +01:00
|
|
|
values[Anum_pg_index_indisreplident - 1] = BoolGetDatum(false);
|
2005-03-29 02:17:27 +02:00
|
|
|
values[Anum_pg_index_indkey - 1] = PointerGetDatum(indkey);
|
2011-02-08 22:04:18 +01:00
|
|
|
values[Anum_pg_index_indcollation - 1] = PointerGetDatum(indcollation);
|
2005-03-29 02:17:27 +02:00
|
|
|
values[Anum_pg_index_indclass - 1] = PointerGetDatum(indclass);
|
2007-01-09 03:14:16 +01:00
|
|
|
values[Anum_pg_index_indoption - 1] = PointerGetDatum(indoption);
|
2003-05-28 18:04:02 +02:00
|
|
|
values[Anum_pg_index_indexprs - 1] = exprsDatum;
|
|
|
|
if (exprsDatum == (Datum) 0)
|
2008-11-02 02:45:28 +01:00
|
|
|
nulls[Anum_pg_index_indexprs - 1] = true;
|
2002-09-27 17:05:23 +02:00
|
|
|
values[Anum_pg_index_indpred - 1] = predDatum;
|
2003-05-28 18:04:02 +02:00
|
|
|
if (predDatum == (Datum) 0)
|
2008-11-02 02:45:28 +01:00
|
|
|
nulls[Anum_pg_index_indpred - 1] = true;
|
2002-09-27 17:05:23 +02:00
|
|
|
|
2008-11-02 02:45:28 +01:00
|
|
|
tuple = heap_form_tuple(RelationGetDescr(pg_index), values, nulls);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2002-08-05 05:29:17 +02:00
|
|
|
* insert the tuple into the pg_index catalog
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2017-01-31 22:42:24 +01:00
|
|
|
CatalogTupleInsert(pg_index, tuple);
|
2000-07-15 00:18:02 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* close the relation and free the tuple
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_index, RowExclusiveLock);
|
1999-12-16 23:20:03 +01:00
|
|
|
heap_freetuple(tuple);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
/*
|
|
|
|
* index_create
|
2001-08-10 20:57:42 +02:00
|
|
|
*
|
2011-01-25 21:42:03 +01:00
|
|
|
* heapRelation: table to build index on (suitably locked by caller)
|
2006-05-11 01:18:39 +02:00
|
|
|
* indexRelationName: what it say
|
|
|
|
* indexRelationId: normally, pass InvalidOid to let this routine
|
2014-05-06 18:12:18 +02:00
|
|
|
* generate an OID for the index. During bootstrap this may be
|
2005-04-14 03:38:22 +02:00
|
|
|
* nonzero to specify a preselected OID.
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
* parentIndexRelid: if creating an index partition, the OID of the
|
|
|
|
* parent index; otherwise InvalidOid.
|
2018-02-19 20:59:37 +01:00
|
|
|
* parentConstraintId: if creating a constraint on a partition, the OID
|
|
|
|
* of the constraint in the parent; otherwise InvalidOid.
|
2011-07-18 17:02:48 +02:00
|
|
|
* relFileNode: normally, pass InvalidOid to get new storage. May be
|
|
|
|
* nonzero to attach an existing valid build.
|
2006-05-11 01:18:39 +02:00
|
|
|
* indexInfo: same info executor uses to insert into the index
|
Adjust naming of indexes and their columns per recent discussion.
Index expression columns are now named after the FigureColname result for
their expressions, rather than always being "pg_expression_N". Digits are
appended to this name if needed to make the column name unique within the
index. (That happens for regular columns too, thus fixing the old problem
that CREATE INDEX fooi ON foo (f1, f1) fails. Before exclusion indexes
there was no real reason to do such a thing, but now maybe there is.)
Default names for indexes and associated constraints now include the column
names of all their columns, not only the first one as in previous practice.
(Of course, this will be truncated as needed to fit in NAMEDATALEN. Also,
pkey indexes retain the historical behavior of not naming specific columns
at all.)
An example of the results:
regression=# create table foo (f1 int, f2 text,
regression(# exclude (f1 with =, lower(f2) with =));
NOTICE: CREATE TABLE / EXCLUDE will create implicit index "foo_f1_lower_exclusion" for table "foo"
CREATE TABLE
regression=# \d foo_f1_lower_exclusion
Index "public.foo_f1_lower_exclusion"
Column | Type | Definition
--------+---------+------------
f1 | integer | f1
lower | text | lower(f2)
btree, for table "public.foo"
2009-12-23 03:35:25 +01:00
|
|
|
* indexColNames: column names to use for index (List of char *)
|
2006-05-11 01:18:39 +02:00
|
|
|
* accessMethodObjectId: OID of index AM to use
|
|
|
|
* tableSpaceId: OID of tablespace to use
|
2011-04-22 23:43:18 +02:00
|
|
|
* collationObjectId: array of collation OIDs, one per index column
|
2006-05-11 01:18:39 +02:00
|
|
|
* classObjectId: array of index opclass OIDs, one per index column
|
2007-01-09 03:14:16 +01:00
|
|
|
* coloptions: array of per-index-column indoption settings
|
2006-07-04 00:45:41 +02:00
|
|
|
* reloptions: AM-specific options
|
2017-11-14 15:19:05 +01:00
|
|
|
* flags: bitmask that can include any combination of these bits:
|
|
|
|
* INDEX_CREATE_IS_PRIMARY
|
|
|
|
* the index is a primary key
|
|
|
|
* INDEX_CREATE_ADD_CONSTRAINT:
|
|
|
|
* invoke index_constraint_create also
|
|
|
|
* INDEX_CREATE_SKIP_BUILD:
|
|
|
|
* skip the index_build() step for the moment; caller must do it
|
|
|
|
* later (typically via reindex_index())
|
|
|
|
* INDEX_CREATE_CONCURRENT:
|
|
|
|
* do not lock the table against writers. The index will be
|
|
|
|
* marked "invalid" and the caller must take additional steps
|
|
|
|
* to fix it up.
|
|
|
|
* INDEX_CREATE_IF_NOT_EXISTS:
|
|
|
|
* do not throw an error if a relation with the same name
|
|
|
|
* already exists.
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
* INDEX_CREATE_PARTITIONED:
|
|
|
|
* create a partitioned index (table must be partitioned)
|
2017-11-14 15:19:05 +01:00
|
|
|
* constr_flags: flags passed to index_constraint_create
|
|
|
|
* (only if INDEX_CREATE_ADD_CONSTRAINT is set)
|
2006-05-11 01:18:39 +02:00
|
|
|
* allow_system_table_mods: allow table to be a system catalog
|
2012-12-06 03:09:46 +01:00
|
|
|
* is_internal: if true, post creation hook for new index
|
2018-02-19 20:59:37 +01:00
|
|
|
* constraintId: if not NULL, receives OID of created constraint
|
2005-04-14 03:38:22 +02:00
|
|
|
*
|
2009-12-07 06:22:23 +01:00
|
|
|
* Returns the OID of the created index.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2001-08-10 20:57:42 +02:00
|
|
|
Oid
|
2011-01-25 21:42:03 +01:00
|
|
|
index_create(Relation heapRelation,
|
2002-03-31 08:26:32 +02:00
|
|
|
const char *indexRelationName,
|
2005-04-14 03:38:22 +02:00
|
|
|
Oid indexRelationId,
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
Oid parentIndexRelid,
|
2018-02-19 20:59:37 +01:00
|
|
|
Oid parentConstraintId,
|
2011-07-18 17:02:48 +02:00
|
|
|
Oid relFileNode,
|
2000-07-15 00:18:02 +02:00
|
|
|
IndexInfo *indexInfo,
|
Adjust naming of indexes and their columns per recent discussion.
Index expression columns are now named after the FigureColname result for
their expressions, rather than always being "pg_expression_N". Digits are
appended to this name if needed to make the column name unique within the
index. (That happens for regular columns too, thus fixing the old problem
that CREATE INDEX fooi ON foo (f1, f1) fails. Before exclusion indexes
there was no real reason to do such a thing, but now maybe there is.)
Default names for indexes and associated constraints now include the column
names of all their columns, not only the first one as in previous practice.
(Of course, this will be truncated as needed to fit in NAMEDATALEN. Also,
pkey indexes retain the historical behavior of not naming specific columns
at all.)
An example of the results:
regression=# create table foo (f1 int, f2 text,
regression(# exclude (f1 with =, lower(f2) with =));
NOTICE: CREATE TABLE / EXCLUDE will create implicit index "foo_f1_lower_exclusion" for table "foo"
CREATE TABLE
regression=# \d foo_f1_lower_exclusion
Index "public.foo_f1_lower_exclusion"
Column | Type | Definition
--------+---------+------------
f1 | integer | f1
lower | text | lower(f2)
btree, for table "public.foo"
2009-12-23 03:35:25 +01:00
|
|
|
List *indexColNames,
|
1997-09-07 07:04:48 +02:00
|
|
|
Oid accessMethodObjectId,
|
2004-06-18 08:14:31 +02:00
|
|
|
Oid tableSpaceId,
|
2011-02-08 22:04:18 +01:00
|
|
|
Oid *collationObjectId,
|
1998-09-01 05:29:17 +02:00
|
|
|
Oid *classObjectId,
|
2007-01-09 03:14:16 +01:00
|
|
|
int16 *coloptions,
|
2006-07-04 00:45:41 +02:00
|
|
|
Datum reloptions,
|
2017-11-14 15:19:05 +01:00
|
|
|
bits16 flags,
|
|
|
|
bits16 constr_flags,
|
2004-05-05 06:48:48 +02:00
|
|
|
bool allow_system_table_mods,
|
2018-02-19 20:59:37 +01:00
|
|
|
bool is_internal,
|
|
|
|
Oid *constraintId)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2011-01-25 21:42:03 +01:00
|
|
|
Oid heapRelationId = RelationGetRelid(heapRelation);
|
2005-08-12 03:36:05 +02:00
|
|
|
Relation pg_class;
|
1997-09-08 04:41:22 +02:00
|
|
|
Relation indexRelation;
|
|
|
|
TupleDesc indexTupDesc;
|
2002-04-27 23:24:34 +02:00
|
|
|
bool shared_relation;
|
2010-02-07 21:48:13 +01:00
|
|
|
bool mapped_relation;
|
2009-12-07 06:22:23 +01:00
|
|
|
bool is_exclusion;
|
2002-03-26 20:17:02 +01:00
|
|
|
Oid namespaceId;
|
2002-07-12 20:43:19 +02:00
|
|
|
int i;
|
2010-12-13 18:34:26 +01:00
|
|
|
char relpersistence;
|
2017-11-14 15:19:05 +01:00
|
|
|
bool isprimary = (flags & INDEX_CREATE_IS_PRIMARY) != 0;
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
bool invalid = (flags & INDEX_CREATE_INVALID) != 0;
|
2017-11-14 15:19:05 +01:00
|
|
|
bool concurrent = (flags & INDEX_CREATE_CONCURRENT) != 0;
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
bool partitioned = (flags & INDEX_CREATE_PARTITIONED) != 0;
|
|
|
|
char relkind;
|
2019-03-29 04:01:14 +01:00
|
|
|
TransactionId relfrozenxid;
|
|
|
|
MultiXactId relminmxid;
|
2017-11-14 15:19:05 +01:00
|
|
|
|
|
|
|
/* constraint flags can only be set when a constraint is requested */
|
|
|
|
Assert((constr_flags == 0) ||
|
|
|
|
((flags & INDEX_CREATE_ADD_CONSTRAINT) != 0));
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
/* partitioned indexes must never be "built" by themselves */
|
|
|
|
Assert(!partitioned || (flags & INDEX_CREATE_SKIP_BUILD));
|
1999-05-25 18:15:34 +02:00
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
relkind = partitioned ? RELKIND_PARTITIONED_INDEX : RELKIND_INDEX;
|
2009-12-07 06:22:23 +01:00
|
|
|
is_exclusion = (indexInfo->ii_ExclusionOps != NULL);
|
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_class = table_open(RelationRelationId, RowExclusiveLock);
|
2005-08-12 03:36:05 +02:00
|
|
|
|
2002-04-27 23:24:34 +02:00
|
|
|
/*
|
2002-09-04 22:31:48 +02:00
|
|
|
* The index will be in the same namespace as its parent table, and is
|
2010-02-26 03:01:40 +01:00
|
|
|
* shared across databases if and only if the parent is. Likewise, it
|
2010-12-13 18:34:26 +01:00
|
|
|
* will use the relfilenode map if and only if the parent does; and it
|
|
|
|
* inherits the parent's relpersistence.
|
2002-04-27 23:24:34 +02:00
|
|
|
*/
|
2002-03-26 20:17:02 +01:00
|
|
|
namespaceId = RelationGetNamespace(heapRelation);
|
2002-04-27 23:24:34 +02:00
|
|
|
shared_relation = heapRelation->rd_rel->relisshared;
|
2010-02-07 21:48:13 +01:00
|
|
|
mapped_relation = RelationIsMapped(heapRelation);
|
2010-12-13 18:34:26 +01:00
|
|
|
relpersistence = heapRelation->rd_rel->relpersistence;
|
2002-03-26 20:17:02 +01:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* check parameters
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2003-05-28 18:04:02 +02:00
|
|
|
if (indexInfo->ii_NumIndexAttrs < 1)
|
2001-08-10 17:49:39 +02:00
|
|
|
elog(ERROR, "must index at least one column");
|
1999-05-25 18:15:34 +02:00
|
|
|
|
2002-03-26 20:17:02 +01:00
|
|
|
if (!allow_system_table_mods &&
|
2002-04-12 22:38:31 +02:00
|
|
|
IsSystemRelation(heapRelation) &&
|
2002-03-26 20:17:02 +01:00
|
|
|
IsNormalProcessingMode())
|
2003-07-21 03:59:11 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
2003-09-25 08:58:07 +02:00
|
|
|
errmsg("user-defined indexes on system catalog tables are not supported")));
|
2001-04-02 16:34:25 +02:00
|
|
|
|
Fix up handling of nondeterministic collations with pattern_ops opclasses.
text_pattern_ops and its siblings can't be used with nondeterministic
collations, because they use the text_eq operator which will not behave
as bitwise equality if applied with a nondeterministic collation. The
initial implementation of that restriction was to insert a run-time test
in the related comparison functions, but that is inefficient, may throw
misleading errors, and will throw errors in some cases that would work.
It seems sufficient to just prevent the combination during CREATE INDEX,
so do that instead.
Lacking any better way to identify the opclasses involved, we need to
hard-wire tests for them, which requires hand-assigned values for their
OIDs, which forces a catversion bump because they previously had OIDs
that would be assigned automatically. That's slightly annoying in the
v12 branch, but fortunately we're not at rc1 yet, so just do it.
Back-patch to v12 where nondeterministic collations were added.
In passing, run make reformat-dat-files, which found some unrelated
whitespace issues (slightly different ones in HEAD and v12).
Peter Eisentraut, with small corrections by me
Discussion: https://postgr.es/m/22566.1568675619@sss.pgh.pa.us
2019-09-21 22:29:17 +02:00
|
|
|
/*
|
|
|
|
* Btree text_pattern_ops uses text_eq as the equality operator, which is
|
|
|
|
* fine as long as the collation is deterministic; text_eq then reduces to
|
|
|
|
* bitwise equality and so it is semantically compatible with the other
|
|
|
|
* operators and functions in that opclass. But with a nondeterministic
|
|
|
|
* collation, text_eq could yield results that are incompatible with the
|
|
|
|
* actual behavior of the index (which is determined by the opclass's
|
|
|
|
* comparison function). We prevent such problems by refusing creation of
|
|
|
|
* an index with that opclass and a nondeterministic collation.
|
|
|
|
*
|
|
|
|
* The same applies to varchar_pattern_ops and bpchar_pattern_ops. If we
|
|
|
|
* find more cases, we might decide to create a real mechanism for marking
|
|
|
|
* opclasses as incompatible with nondeterminism; but for now, this small
|
|
|
|
* hack suffices.
|
|
|
|
*
|
|
|
|
* Another solution is to use a special operator, not text_eq, as the
|
|
|
|
* equality opclass member; but that is undesirable because it would
|
|
|
|
* prevent index usage in many queries that work fine today.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++)
|
|
|
|
{
|
|
|
|
Oid collation = collationObjectId[i];
|
|
|
|
Oid opclass = classObjectId[i];
|
|
|
|
|
|
|
|
if (collation)
|
|
|
|
{
|
|
|
|
if ((opclass == TEXT_BTREE_PATTERN_OPS_OID ||
|
|
|
|
opclass == VARCHAR_BTREE_PATTERN_OPS_OID ||
|
|
|
|
opclass == BPCHAR_BTREE_PATTERN_OPS_OID) &&
|
|
|
|
!get_collation_isdeterministic(collation))
|
|
|
|
{
|
|
|
|
HeapTuple classtup;
|
|
|
|
|
|
|
|
classtup = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));
|
|
|
|
if (!HeapTupleIsValid(classtup))
|
|
|
|
elog(ERROR, "cache lookup failed for operator class %u", opclass);
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("nondeterministic collations are not supported for operator class \"%s\"",
|
|
|
|
NameStr(((Form_pg_opclass) GETSTRUCT(classtup))->opcname))));
|
|
|
|
ReleaseSysCache(classtup);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-08-25 06:06:58 +02:00
|
|
|
/*
|
2019-03-29 08:25:20 +01:00
|
|
|
* Concurrent index build on a system catalog is unsafe because we tend to
|
|
|
|
* release locks before committing in catalogs.
|
2006-08-25 06:06:58 +02:00
|
|
|
*/
|
|
|
|
if (concurrent &&
|
2019-03-29 08:25:20 +01:00
|
|
|
IsCatalogRelation(heapRelation))
|
2006-08-25 06:06:58 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("concurrent index creation on system catalog tables is not supported")));
|
|
|
|
|
2009-12-07 06:22:23 +01:00
|
|
|
/*
|
2019-06-24 10:39:12 +02:00
|
|
|
* This case is currently not supported. There's no way to ask for it in
|
|
|
|
* the grammar with CREATE INDEX, but it can happen with REINDEX.
|
2009-12-07 06:22:23 +01:00
|
|
|
*/
|
|
|
|
if (concurrent && is_exclusion)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
2019-06-24 10:39:12 +02:00
|
|
|
errmsg("concurrent index creation for exclusion constraints is not supported")));
|
2009-12-07 06:22:23 +01:00
|
|
|
|
2002-04-27 23:24:34 +02:00
|
|
|
/*
|
|
|
|
* We cannot allow indexing a shared relation after initdb (because
|
|
|
|
* there's no way to make the entry in other databases' pg_class).
|
|
|
|
*/
|
2006-07-31 22:09:10 +02:00
|
|
|
if (shared_relation && !IsBootstrapProcessingMode())
|
2003-07-21 03:59:11 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
2005-10-15 04:49:52 +02:00
|
|
|
errmsg("shared indexes cannot be created after initdb")));
|
2002-04-27 23:24:34 +02:00
|
|
|
|
2007-10-12 20:55:12 +02:00
|
|
|
/*
|
2010-02-07 21:48:13 +01:00
|
|
|
* Shared relations must be in pg_global, too (last-ditch check)
|
2007-10-12 20:55:12 +02:00
|
|
|
*/
|
2010-02-07 21:48:13 +01:00
|
|
|
if (shared_relation && tableSpaceId != GLOBALTABLESPACE_OID)
|
|
|
|
elog(ERROR, "shared relations must be placed in pg_global tablespace");
|
2007-10-12 20:55:12 +02:00
|
|
|
|
Fully enforce uniqueness of constraint names.
It's been true for a long time that we expect names of table and domain
constraints to be unique among the constraints of that table or domain.
However, the enforcement of that has been pretty haphazard, and it missed
some corner cases such as creating a CHECK constraint and then an index
constraint of the same name (as per recent report from André Hänsel).
Also, due to the lack of an actual unique index enforcing this, duplicates
could be created through race conditions.
Moreover, the code that searches pg_constraint has been quite inconsistent
about how to handle duplicate names if one did occur: some places checked
and threw errors if there was more than one match, while others just
processed the first match they came to.
To fix, create a unique index on (conrelid, contypid, conname). Since
either conrelid or contypid is zero, this will separately enforce
uniqueness of constraint names among constraints of any one table and any
one domain. (If we ever implement SQL assertions, and put them into this
catalog, more thought might be needed. But it'd be at least as reasonable
to put them into a new catalog; having overloaded this one catalog with
two kinds of constraints was a mistake already IMO.) This index can replace
the existing non-unique index on conrelid, though we need to keep the one
on contypid for query performance reasons.
Having done that, we can simplify the logic in various places that either
coped with duplicates or neglected to, as well as potentially improve
lookup performance when searching for a constraint by name.
Also, as per our usual practice, install a preliminary check so that you
get something more friendly than a unique-index violation report in the
case complained of by André. And teach ChooseIndexName to avoid choosing
autogenerated names that would draw such a failure.
While it's not possible to make such a change in the back branches,
it doesn't seem quite too late to put this into v11, so do so.
Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
|
|
|
/*
|
|
|
|
* Check for duplicate name (both as to the index, and as to the
|
|
|
|
* associated constraint if any). Such cases would fail on the relevant
|
|
|
|
* catalogs' unique indexes anyway, but we prefer to give a friendlier
|
|
|
|
* error message.
|
|
|
|
*/
|
2002-03-31 08:26:32 +02:00
|
|
|
if (get_relname_relid(indexRelationName, namespaceId))
|
2014-11-06 10:48:33 +01:00
|
|
|
{
|
2017-11-14 15:19:05 +01:00
|
|
|
if ((flags & INDEX_CREATE_IF_NOT_EXISTS) != 0)
|
2014-11-06 10:48:33 +01:00
|
|
|
{
|
|
|
|
ereport(NOTICE,
|
|
|
|
(errcode(ERRCODE_DUPLICATE_TABLE),
|
|
|
|
errmsg("relation \"%s\" already exists, skipping",
|
|
|
|
indexRelationName)));
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_class, RowExclusiveLock);
|
2014-11-06 10:48:33 +01:00
|
|
|
return InvalidOid;
|
|
|
|
}
|
|
|
|
|
2003-07-21 03:59:11 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_DUPLICATE_TABLE),
|
|
|
|
errmsg("relation \"%s\" already exists",
|
|
|
|
indexRelationName)));
|
2014-11-06 10:48:33 +01:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Fully enforce uniqueness of constraint names.
It's been true for a long time that we expect names of table and domain
constraints to be unique among the constraints of that table or domain.
However, the enforcement of that has been pretty haphazard, and it missed
some corner cases such as creating a CHECK constraint and then an index
constraint of the same name (as per recent report from André Hänsel).
Also, due to the lack of an actual unique index enforcing this, duplicates
could be created through race conditions.
Moreover, the code that searches pg_constraint has been quite inconsistent
about how to handle duplicate names if one did occur: some places checked
and threw errors if there was more than one match, while others just
processed the first match they came to.
To fix, create a unique index on (conrelid, contypid, conname). Since
either conrelid or contypid is zero, this will separately enforce
uniqueness of constraint names among constraints of any one table and any
one domain. (If we ever implement SQL assertions, and put them into this
catalog, more thought might be needed. But it'd be at least as reasonable
to put them into a new catalog; having overloaded this one catalog with
two kinds of constraints was a mistake already IMO.) This index can replace
the existing non-unique index on conrelid, though we need to keep the one
on contypid for query performance reasons.
Having done that, we can simplify the logic in various places that either
coped with duplicates or neglected to, as well as potentially improve
lookup performance when searching for a constraint by name.
Also, as per our usual practice, install a preliminary check so that you
get something more friendly than a unique-index violation report in the
case complained of by André. And teach ChooseIndexName to avoid choosing
autogenerated names that would draw such a failure.
While it's not possible to make such a change in the back branches,
it doesn't seem quite too late to put this into v11, so do so.
Discussion: https://postgr.es/m/0c1001d4428f$0942b430$1bc81c90$@webkr.de
2018-09-04 19:45:35 +02:00
|
|
|
if ((flags & INDEX_CREATE_ADD_CONSTRAINT) != 0 &&
|
|
|
|
ConstraintNameIsUsed(CONSTRAINT_RELATION, heapRelationId,
|
|
|
|
indexRelationName))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* INDEX_CREATE_IF_NOT_EXISTS does not apply here, since the
|
|
|
|
* conflicting constraint is not an index.
|
|
|
|
*/
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
|
|
|
errmsg("constraint \"%s\" for relation \"%s\" already exists",
|
|
|
|
indexRelationName, RelationGetRelationName(heapRelation))));
|
|
|
|
}
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2001-08-22 20:24:26 +02:00
|
|
|
* construct tuple descriptor for index tuples
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2003-05-28 18:04:02 +02:00
|
|
|
indexTupDesc = ConstructTupleDescriptor(heapRelation,
|
|
|
|
indexInfo,
|
Adjust naming of indexes and their columns per recent discussion.
Index expression columns are now named after the FigureColname result for
their expressions, rather than always being "pg_expression_N". Digits are
appended to this name if needed to make the column name unique within the
index. (That happens for regular columns too, thus fixing the old problem
that CREATE INDEX fooi ON foo (f1, f1) fails. Before exclusion indexes
there was no real reason to do such a thing, but now maybe there is.)
Default names for indexes and associated constraints now include the column
names of all their columns, not only the first one as in previous practice.
(Of course, this will be truncated as needed to fit in NAMEDATALEN. Also,
pkey indexes retain the historical behavior of not naming specific columns
at all.)
An example of the results:
regression=# create table foo (f1 int, f2 text,
regression(# exclude (f1 with =, lower(f2) with =));
NOTICE: CREATE TABLE / EXCLUDE will create implicit index "foo_f1_lower_exclusion" for table "foo"
CREATE TABLE
regression=# \d foo_f1_lower_exclusion
Index "public.foo_f1_lower_exclusion"
Column | Type | Definition
--------+---------+------------
f1 | integer | f1
lower | text | lower(f2)
btree, for table "public.foo"
2009-12-23 03:35:25 +01:00
|
|
|
indexColNames,
|
2008-09-15 20:43:41 +02:00
|
|
|
accessMethodObjectId,
|
2011-02-08 22:04:18 +01:00
|
|
|
collationObjectId,
|
2003-05-28 18:04:02 +02:00
|
|
|
classObjectId);
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2010-02-03 02:14:17 +01:00
|
|
|
/*
|
|
|
|
* Allocate an OID for the index, unless we were told what to use.
|
|
|
|
*
|
|
|
|
* The OID will be the relfilenode as well, so make sure it doesn't
|
|
|
|
* collide with either pg_class OIDs or existing physical files.
|
|
|
|
*/
|
|
|
|
if (!OidIsValid(indexRelationId))
|
2010-01-06 04:04:03 +01:00
|
|
|
{
|
2014-08-26 04:19:05 +02:00
|
|
|
/* Use binary-upgrade override for pg_class.oid/relfilenode? */
|
|
|
|
if (IsBinaryUpgrade)
|
2010-02-03 02:14:17 +01:00
|
|
|
{
|
2014-08-26 04:19:05 +02:00
|
|
|
if (!OidIsValid(binary_upgrade_next_index_pg_class_oid))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("pg_class index OID value not set when in binary upgrade mode")));
|
|
|
|
|
2011-01-08 03:25:34 +01:00
|
|
|
indexRelationId = binary_upgrade_next_index_pg_class_oid;
|
|
|
|
binary_upgrade_next_index_pg_class_oid = InvalidOid;
|
2010-02-03 02:14:17 +01:00
|
|
|
}
|
|
|
|
else
|
2010-08-13 22:10:54 +02:00
|
|
|
{
|
|
|
|
indexRelationId =
|
2010-12-13 18:34:26 +01:00
|
|
|
GetNewRelFileNode(tableSpaceId, pg_class, relpersistence);
|
2010-08-13 22:10:54 +02:00
|
|
|
}
|
2010-01-06 04:04:03 +01:00
|
|
|
}
|
2005-08-12 03:36:05 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
* create the index relation's relcache entry and, if necessary, the
|
|
|
|
* physical disk file. (If we fail further down, it's the smgr's
|
|
|
|
* responsibility to remove the disk file again, if any.)
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2002-03-26 20:17:02 +01:00
|
|
|
indexRelation = heap_create(indexRelationName,
|
|
|
|
namespaceId,
|
2004-06-18 08:14:31 +02:00
|
|
|
tableSpaceId,
|
2005-04-14 03:38:22 +02:00
|
|
|
indexRelationId,
|
2011-07-18 17:02:48 +02:00
|
|
|
relFileNode,
|
tableam: introduce table AM infrastructure.
This introduces the concept of table access methods, i.e. CREATE
ACCESS METHOD ... TYPE TABLE and
CREATE TABLE ... USING (storage-engine).
No table access functionality is delegated to table AMs as of this
commit, that'll be done in following commits.
Subsequent commits will incrementally abstract table access
functionality to be routed through table access methods. That change
is too large to be reviewed & committed at once, so it'll be done
incrementally.
Docs will be updated at the end, as adding them incrementally would
likely make them less coherent, and definitely is a lot more work,
without a lot of benefit.
Table access methods are specified similar to index access methods,
i.e. pg_am.amhandler returns, as INTERNAL, a pointer to a struct with
callbacks. In contrast to index AMs that struct needs to live as long
as a backend, typically that's achieved by just returning a pointer to
a constant struct.
Psql's \d+ now displays a table's access method. That can be disabled
with HIDE_TABLEAM=true, which is mainly useful so regression tests can
be run against different AMs. It's quite possible that this behaviour
still needs to be fine tuned.
For now it's not allowed to set a table AM for a partitioned table, as
we've not resolved how partitions would inherit that. Disallowing
allows us to introduce, if we decide that's the way forward, such a
behaviour without a compatibility break.
Catversion bumped, to add the heap table AM and references to it.
Author: Haribabu Kommi, Andres Freund, Alvaro Herrera, Dimitri Golgov and others
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
https://postgr.es/m/20190107235616.6lur25ph22u5u5av@alap3.anarazel.de
https://postgr.es/m/20190304234700.w5tmhducs5wxgzls@alap3.anarazel.de
2019-03-06 18:54:38 +01:00
|
|
|
accessMethodObjectId,
|
2002-03-26 20:17:02 +01:00
|
|
|
indexTupDesc,
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
relkind,
|
2010-12-13 18:34:26 +01:00
|
|
|
relpersistence,
|
2002-04-27 23:24:34 +02:00
|
|
|
shared_relation,
|
2013-06-03 16:22:31 +02:00
|
|
|
mapped_relation,
|
2019-03-29 04:01:14 +01:00
|
|
|
allow_system_table_mods,
|
|
|
|
&relfrozenxid,
|
|
|
|
&relminmxid);
|
2002-08-11 23:17:35 +02:00
|
|
|
|
2019-03-29 04:01:14 +01:00
|
|
|
Assert(relfrozenxid == InvalidTransactionId);
|
|
|
|
Assert(relminmxid == InvalidMultiXactId);
|
2005-08-12 03:36:05 +02:00
|
|
|
Assert(indexRelationId == RelationGetRelid(indexRelation));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2000-11-08 23:10:03 +01:00
|
|
|
/*
|
Support parallel btree index builds.
To make this work, tuplesort.c and logtape.c must also support
parallelism, so this patch adds that infrastructure and then applies
it to the particular case of parallel btree index builds. Testing
to date shows that this can often be 2-3x faster than a serial
index build.
The model for deciding how many workers to use is fairly primitive
at present, but it's better than not having the feature. We can
refine it as we get more experience.
Peter Geoghegan with some help from Rushabh Lathia. While Heikki
Linnakangas is not an author of this patch, he wrote other patches
without which this feature would not have been possible, and
therefore the release notes should possibly credit him as an author
of this feature. Reviewed by Claudio Freire, Heikki Linnakangas,
Thomas Munro, Tels, Amit Kapila, me.
Discussion: http://postgr.es/m/CAM3SWZQKM=Pzc=CAHzRixKjp2eO5Q0Jg1SoFQqeXFQ647JiwqQ@mail.gmail.com
Discussion: http://postgr.es/m/CAH2-Wz=AxWqDoVvGU7dq856S4r6sJAj6DBn7VMtigkB33N5eyg@mail.gmail.com
2018-02-02 19:25:55 +01:00
|
|
|
* Obtain exclusive lock on it. Although no other transactions can see it
|
2000-11-08 23:10:03 +01:00
|
|
|
* until we commit, this prevents deadlock-risk complaints from lock
|
|
|
|
* manager in cases such as CLUSTER.
|
|
|
|
*/
|
|
|
|
LockRelation(indexRelation, AccessExclusiveLock);
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Fill in fields of the index's pg_class entry that are not set correctly
|
|
|
|
* by heap_create.
|
1997-09-07 07:04:48 +02:00
|
|
|
*
|
2002-04-27 23:24:34 +02:00
|
|
|
* XXX should have a cleaner way to create cataloged indexes
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2005-08-26 05:08:15 +02:00
|
|
|
indexRelation->rd_rel->relowner = heapRelation->rd_rel->relowner;
|
2002-04-27 23:24:34 +02:00
|
|
|
indexRelation->rd_rel->relam = accessMethodObjectId;
|
2018-04-12 02:27:12 +02:00
|
|
|
indexRelation->rd_rel->relispartition = OidIsValid(parentIndexRelid);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2002-04-27 23:24:34 +02:00
|
|
|
/*
|
|
|
|
* store index's pg_class entry
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2006-07-04 00:45:41 +02:00
|
|
|
InsertPgClassTuple(pg_class, indexRelation,
|
|
|
|
RelationGetRelid(indexRelation),
|
2009-10-05 21:24:49 +02:00
|
|
|
(Datum) 0,
|
2006-07-04 00:45:41 +02:00
|
|
|
reloptions);
|
2005-08-12 03:36:05 +02:00
|
|
|
|
|
|
|
/* done with pg_class */
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_class, RowExclusiveLock);
|
1996-08-26 08:32:06 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* now update the object id's of all the attribute tuple forms in the
|
|
|
|
* index relation's tuple descriptor
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2000-07-15 00:18:02 +02:00
|
|
|
InitializeAttributeOids(indexRelation,
|
|
|
|
indexInfo->ii_NumIndexAttrs,
|
2005-08-12 03:36:05 +02:00
|
|
|
indexRelationId);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* append ATTRIBUTE tuples for the index
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
AppendAttributeTuples(indexRelation, indexInfo->ii_NumIndexAttrs,
|
|
|
|
indexInfo->ii_OpclassOptions);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* update pg_index
|
|
|
|
* (append INDEX tuple)
|
|
|
|
*
|
|
|
|
* Note that this stows away a representation of "predicate".
|
|
|
|
* (Or, could define a rule to maintain the predicate) --Nels, Feb '92
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------
|
|
|
|
*/
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
UpdateIndexRelation(indexRelationId, heapRelationId, parentIndexRelid,
|
|
|
|
indexInfo,
|
2011-04-22 23:43:18 +02:00
|
|
|
collationObjectId, classObjectId, coloptions,
|
|
|
|
isprimary, is_exclusion,
|
2017-11-14 15:19:05 +01:00
|
|
|
(constr_flags & INDEX_CONSTR_CREATE_DEFERRABLE) == 0,
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
!concurrent && !invalid,
|
2009-07-29 22:56:21 +02:00
|
|
|
!concurrent);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2018-08-03 23:45:11 +02:00
|
|
|
/*
|
|
|
|
* Register relcache invalidation on the indexes' heap relation, to
|
|
|
|
* maintain consistency of its index list
|
|
|
|
*/
|
|
|
|
CacheInvalidateRelcache(heapRelation);
|
|
|
|
|
2018-10-22 04:04:48 +02:00
|
|
|
/* update pg_inherits and the parent's relhassubclass, if needed */
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
if (OidIsValid(parentIndexRelid))
|
2018-10-22 04:04:48 +02:00
|
|
|
{
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
StoreSingleInheritance(indexRelationId, parentIndexRelid, 1);
|
2018-10-22 04:04:48 +02:00
|
|
|
SetRelationHasSubclass(parentIndexRelid, true);
|
|
|
|
}
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2002-07-12 20:43:19 +02:00
|
|
|
* Register constraint and dependencies for the index.
|
|
|
|
*
|
2005-11-22 19:17:34 +01:00
|
|
|
* If the index is from a CONSTRAINT clause, construct a pg_constraint
|
2011-01-25 21:42:03 +01:00
|
|
|
* entry. The index will be linked to the constraint, which in turn is
|
|
|
|
* linked to the table. If it's not a CONSTRAINT, we need to make a
|
|
|
|
* dependency directly on the table.
|
2002-07-12 20:43:19 +02:00
|
|
|
*
|
2002-09-04 22:31:48 +02:00
|
|
|
* We don't need a dependency on the namespace, because there'll be an
|
|
|
|
* indirect dependency via our parent table.
|
2002-07-18 18:47:26 +02:00
|
|
|
*
|
2005-11-22 19:17:34 +01:00
|
|
|
* During bootstrap we can't register any dependencies, and we don't try
|
|
|
|
* to make a constraint either.
|
2002-07-12 20:43:19 +02:00
|
|
|
*/
|
|
|
|
if (!IsBootstrapProcessingMode())
|
|
|
|
{
|
2002-09-04 22:31:48 +02:00
|
|
|
ObjectAddress myself,
|
|
|
|
referenced;
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2020-07-01 10:03:50 +02:00
|
|
|
ObjectAddressSet(myself, RelationRelationId, indexRelationId);
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2017-11-14 15:19:05 +01:00
|
|
|
if ((flags & INDEX_CREATE_ADD_CONSTRAINT) != 0)
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
|
|
|
char constraintType;
|
2018-02-19 20:59:37 +01:00
|
|
|
ObjectAddress localaddr;
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
if (isprimary)
|
2002-07-12 20:43:19 +02:00
|
|
|
constraintType = CONSTRAINT_PRIMARY;
|
|
|
|
else if (indexInfo->ii_Unique)
|
|
|
|
constraintType = CONSTRAINT_UNIQUE;
|
2009-12-07 06:22:23 +01:00
|
|
|
else if (is_exclusion)
|
|
|
|
constraintType = CONSTRAINT_EXCLUSION;
|
2002-07-12 20:43:19 +02:00
|
|
|
else
|
|
|
|
{
|
2009-12-07 06:22:23 +01:00
|
|
|
elog(ERROR, "constraint must be PRIMARY, UNIQUE or EXCLUDE");
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
constraintType = 0; /* keep compiler quiet */
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|
|
|
|
|
2018-02-19 20:59:37 +01:00
|
|
|
localaddr = index_constraint_create(heapRelation,
|
2018-04-26 20:47:16 +02:00
|
|
|
indexRelationId,
|
|
|
|
parentConstraintId,
|
|
|
|
indexInfo,
|
|
|
|
indexRelationName,
|
|
|
|
constraintType,
|
|
|
|
constr_flags,
|
|
|
|
allow_system_table_mods,
|
|
|
|
is_internal);
|
2018-02-19 20:59:37 +01:00
|
|
|
if (constraintId)
|
|
|
|
*constraintId = localaddr.objectId;
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2007-11-15 22:14:46 +01:00
|
|
|
bool have_simple_col = false;
|
2007-11-09 00:22:54 +01:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
/* Create auto dependencies on simply-referenced columns */
|
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
2018-04-12 12:02:45 +02:00
|
|
|
if (indexInfo->ii_IndexAttrNumbers[i] != 0)
|
2003-05-28 18:04:02 +02:00
|
|
|
{
|
2020-07-01 10:03:50 +02:00
|
|
|
ObjectAddressSubSet(referenced, RelationRelationId,
|
|
|
|
heapRelationId,
|
|
|
|
indexInfo->ii_IndexAttrNumbers[i]);
|
Redesign the partition dependency mechanism.
The original setup for dependencies of partitioned objects had
serious problems:
1. It did not verify that a drop cascading to a partition-child object
also cascaded to at least one of the object's partition parents. Now,
normally a child object would share all its dependencies with one or
another parent (e.g. a child index's opclass dependencies would be shared
with the parent index), so that this oversight is usually harmless.
But if some dependency failed to fit this pattern, the child could be
dropped while all its parents remain, creating a logically broken
situation. (It's easy to construct artificial cases that break it,
such as attaching an unrelated extension dependency to the child object
and then dropping the extension. I'm not sure if any less-artificial
cases exist.)
2. Management of partition dependencies during ATTACH/DETACH PARTITION
was complicated and buggy; for example, after detaching a partition
table it was possible to create cases where a formerly-child index
should be dropped and was not, because the correct set of dependencies
had not been reconstructed.
Less seriously, because multiple partition relationships were
represented identically in pg_depend, there was an order-of-traversal
dependency on which partition parent was cited in error messages.
We also had some pre-existing order-of-traversal hazards for error
messages related to internal and extension dependencies. This is
cosmetic to users but causes testing problems.
To fix #1, add a check at the end of the partition tree traversal
to ensure that at least one partition parent got deleted. To fix #2,
establish a new policy that partition dependencies are in addition to,
not instead of, a child object's usual dependencies; in this way
ATTACH/DETACH PARTITION need not cope with adding or removing the
usual dependencies.
To fix the cosmetic problem, distinguish between primary and secondary
partition dependency entries in pg_depend, by giving them different
deptypes. (They behave identically except for having different
priorities for being cited in error messages.) This means that the
former 'I' dependency type is replaced with new 'P' and 'S' types.
This also fixes a longstanding bug that after handling an internal
dependency by recursing to the owning object, findDependentObjects
did not verify that the current target was now scheduled for deletion,
and did not apply the current recursion level's objflags to it.
Perhaps that should be back-patched; but in the back branches it
would only matter if some concurrent transaction had removed the
internal-linkage pg_depend entry before the recursive call found it,
or the recursive call somehow failed to find it, both of which seem
unlikely.
Catversion bump because the contents of pg_depend change for
partitioning relationships.
Patch HEAD only. It's annoying that we're not fixing #2 in v11,
but there seems no practical way to do so given that the problem
is exactly a poor choice of what entries to put in pg_depend.
We can't really fix that while staying compatible with what's
in pg_depend in existing v11 installations.
Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
|
2007-11-09 00:22:54 +01:00
|
|
|
have_simple_col = true;
|
2003-05-28 18:04:02 +02:00
|
|
|
}
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|
2007-11-09 00:22:54 +01:00
|
|
|
|
|
|
|
/*
|
2010-11-02 22:15:07 +01:00
|
|
|
* If there are no simply-referenced columns, give the index an
|
2014-05-06 18:12:18 +02:00
|
|
|
* auto dependency on the whole table. In most cases, this will
|
2010-11-02 22:15:07 +01:00
|
|
|
* be redundant, but it might not be if the index expressions and
|
|
|
|
* predicate contain no Vars or only whole-row Vars.
|
2007-11-09 00:22:54 +01:00
|
|
|
*/
|
2010-11-02 22:15:07 +01:00
|
|
|
if (!have_simple_col)
|
2007-11-09 00:22:54 +01:00
|
|
|
{
|
2020-07-01 10:03:50 +02:00
|
|
|
ObjectAddressSet(referenced, RelationRelationId,
|
|
|
|
heapRelationId);
|
Redesign the partition dependency mechanism.
The original setup for dependencies of partitioned objects had
serious problems:
1. It did not verify that a drop cascading to a partition-child object
also cascaded to at least one of the object's partition parents. Now,
normally a child object would share all its dependencies with one or
another parent (e.g. a child index's opclass dependencies would be shared
with the parent index), so that this oversight is usually harmless.
But if some dependency failed to fit this pattern, the child could be
dropped while all its parents remain, creating a logically broken
situation. (It's easy to construct artificial cases that break it,
such as attaching an unrelated extension dependency to the child object
and then dropping the extension. I'm not sure if any less-artificial
cases exist.)
2. Management of partition dependencies during ATTACH/DETACH PARTITION
was complicated and buggy; for example, after detaching a partition
table it was possible to create cases where a formerly-child index
should be dropped and was not, because the correct set of dependencies
had not been reconstructed.
Less seriously, because multiple partition relationships were
represented identically in pg_depend, there was an order-of-traversal
dependency on which partition parent was cited in error messages.
We also had some pre-existing order-of-traversal hazards for error
messages related to internal and extension dependencies. This is
cosmetic to users but causes testing problems.
To fix #1, add a check at the end of the partition tree traversal
to ensure that at least one partition parent got deleted. To fix #2,
establish a new policy that partition dependencies are in addition to,
not instead of, a child object's usual dependencies; in this way
ATTACH/DETACH PARTITION need not cope with adding or removing the
usual dependencies.
To fix the cosmetic problem, distinguish between primary and secondary
partition dependency entries in pg_depend, by giving them different
deptypes. (They behave identically except for having different
priorities for being cited in error messages.) This means that the
former 'I' dependency type is replaced with new 'P' and 'S' types.
This also fixes a longstanding bug that after handling an internal
dependency by recursing to the owning object, findDependentObjects
did not verify that the current target was now scheduled for deletion,
and did not apply the current recursion level's objflags to it.
Perhaps that should be back-patched; but in the back branches it
would only matter if some concurrent transaction had removed the
internal-linkage pg_depend entry before the recursive call found it,
or the recursive call somehow failed to find it, both of which seem
unlikely.
Catversion bump because the contents of pg_depend change for
partitioning relationships.
Patch HEAD only. It's annoying that we're not fixing #2 in v11,
but there seems no practical way to do so given that the problem
is exactly a poor choice of what entries to put in pg_depend.
We can't really fix that while staying compatible with what's
in pg_depend in existing v11 installations.
Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
|
2007-11-09 00:22:54 +01:00
|
|
|
}
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|
|
|
|
|
Redesign the partition dependency mechanism.
The original setup for dependencies of partitioned objects had
serious problems:
1. It did not verify that a drop cascading to a partition-child object
also cascaded to at least one of the object's partition parents. Now,
normally a child object would share all its dependencies with one or
another parent (e.g. a child index's opclass dependencies would be shared
with the parent index), so that this oversight is usually harmless.
But if some dependency failed to fit this pattern, the child could be
dropped while all its parents remain, creating a logically broken
situation. (It's easy to construct artificial cases that break it,
such as attaching an unrelated extension dependency to the child object
and then dropping the extension. I'm not sure if any less-artificial
cases exist.)
2. Management of partition dependencies during ATTACH/DETACH PARTITION
was complicated and buggy; for example, after detaching a partition
table it was possible to create cases where a formerly-child index
should be dropped and was not, because the correct set of dependencies
had not been reconstructed.
Less seriously, because multiple partition relationships were
represented identically in pg_depend, there was an order-of-traversal
dependency on which partition parent was cited in error messages.
We also had some pre-existing order-of-traversal hazards for error
messages related to internal and extension dependencies. This is
cosmetic to users but causes testing problems.
To fix #1, add a check at the end of the partition tree traversal
to ensure that at least one partition parent got deleted. To fix #2,
establish a new policy that partition dependencies are in addition to,
not instead of, a child object's usual dependencies; in this way
ATTACH/DETACH PARTITION need not cope with adding or removing the
usual dependencies.
To fix the cosmetic problem, distinguish between primary and secondary
partition dependency entries in pg_depend, by giving them different
deptypes. (They behave identically except for having different
priorities for being cited in error messages.) This means that the
former 'I' dependency type is replaced with new 'P' and 'S' types.
This also fixes a longstanding bug that after handling an internal
dependency by recursing to the owning object, findDependentObjects
did not verify that the current target was now scheduled for deletion,
and did not apply the current recursion level's objflags to it.
Perhaps that should be back-patched; but in the back branches it
would only matter if some concurrent transaction had removed the
internal-linkage pg_depend entry before the recursive call found it,
or the recursive call somehow failed to find it, both of which seem
unlikely.
Catversion bump because the contents of pg_depend change for
partitioning relationships.
Patch HEAD only. It's annoying that we're not fixing #2 in v11,
but there seems no practical way to do so given that the problem
is exactly a poor choice of what entries to put in pg_depend.
We can't really fix that while staying compatible with what's
in pg_depend in existing v11 installations.
Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
|
|
|
/*
|
|
|
|
* If this is an index partition, create partition dependencies on
|
|
|
|
* both the parent index and the table. (Note: these must be *in
|
|
|
|
* addition to*, not instead of, all other dependencies. Otherwise
|
|
|
|
* we'll be short some dependencies after DETACH PARTITION.)
|
|
|
|
*/
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
if (OidIsValid(parentIndexRelid))
|
|
|
|
{
|
2020-07-01 10:03:50 +02:00
|
|
|
ObjectAddressSet(referenced, RelationRelationId, parentIndexRelid);
|
Redesign the partition dependency mechanism.
The original setup for dependencies of partitioned objects had
serious problems:
1. It did not verify that a drop cascading to a partition-child object
also cascaded to at least one of the object's partition parents. Now,
normally a child object would share all its dependencies with one or
another parent (e.g. a child index's opclass dependencies would be shared
with the parent index), so that this oversight is usually harmless.
But if some dependency failed to fit this pattern, the child could be
dropped while all its parents remain, creating a logically broken
situation. (It's easy to construct artificial cases that break it,
such as attaching an unrelated extension dependency to the child object
and then dropping the extension. I'm not sure if any less-artificial
cases exist.)
2. Management of partition dependencies during ATTACH/DETACH PARTITION
was complicated and buggy; for example, after detaching a partition
table it was possible to create cases where a formerly-child index
should be dropped and was not, because the correct set of dependencies
had not been reconstructed.
Less seriously, because multiple partition relationships were
represented identically in pg_depend, there was an order-of-traversal
dependency on which partition parent was cited in error messages.
We also had some pre-existing order-of-traversal hazards for error
messages related to internal and extension dependencies. This is
cosmetic to users but causes testing problems.
To fix #1, add a check at the end of the partition tree traversal
to ensure that at least one partition parent got deleted. To fix #2,
establish a new policy that partition dependencies are in addition to,
not instead of, a child object's usual dependencies; in this way
ATTACH/DETACH PARTITION need not cope with adding or removing the
usual dependencies.
To fix the cosmetic problem, distinguish between primary and secondary
partition dependency entries in pg_depend, by giving them different
deptypes. (They behave identically except for having different
priorities for being cited in error messages.) This means that the
former 'I' dependency type is replaced with new 'P' and 'S' types.
This also fixes a longstanding bug that after handling an internal
dependency by recursing to the owning object, findDependentObjects
did not verify that the current target was now scheduled for deletion,
and did not apply the current recursion level's objflags to it.
Perhaps that should be back-patched; but in the back branches it
would only matter if some concurrent transaction had removed the
internal-linkage pg_depend entry before the recursive call found it,
or the recursive call somehow failed to find it, both of which seem
unlikely.
Catversion bump because the contents of pg_depend change for
partitioning relationships.
Patch HEAD only. It's annoying that we're not fixing #2 in v11,
but there seems no practical way to do so given that the problem
is exactly a poor choice of what entries to put in pg_depend.
We can't really fix that while staying compatible with what's
in pg_depend in existing v11 installations.
Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
|
|
|
|
|
2020-07-01 10:03:50 +02:00
|
|
|
ObjectAddressSet(referenced, RelationRelationId, heapRelationId);
|
Redesign the partition dependency mechanism.
The original setup for dependencies of partitioned objects had
serious problems:
1. It did not verify that a drop cascading to a partition-child object
also cascaded to at least one of the object's partition parents. Now,
normally a child object would share all its dependencies with one or
another parent (e.g. a child index's opclass dependencies would be shared
with the parent index), so that this oversight is usually harmless.
But if some dependency failed to fit this pattern, the child could be
dropped while all its parents remain, creating a logically broken
situation. (It's easy to construct artificial cases that break it,
such as attaching an unrelated extension dependency to the child object
and then dropping the extension. I'm not sure if any less-artificial
cases exist.)
2. Management of partition dependencies during ATTACH/DETACH PARTITION
was complicated and buggy; for example, after detaching a partition
table it was possible to create cases where a formerly-child index
should be dropped and was not, because the correct set of dependencies
had not been reconstructed.
Less seriously, because multiple partition relationships were
represented identically in pg_depend, there was an order-of-traversal
dependency on which partition parent was cited in error messages.
We also had some pre-existing order-of-traversal hazards for error
messages related to internal and extension dependencies. This is
cosmetic to users but causes testing problems.
To fix #1, add a check at the end of the partition tree traversal
to ensure that at least one partition parent got deleted. To fix #2,
establish a new policy that partition dependencies are in addition to,
not instead of, a child object's usual dependencies; in this way
ATTACH/DETACH PARTITION need not cope with adding or removing the
usual dependencies.
To fix the cosmetic problem, distinguish between primary and secondary
partition dependency entries in pg_depend, by giving them different
deptypes. (They behave identically except for having different
priorities for being cited in error messages.) This means that the
former 'I' dependency type is replaced with new 'P' and 'S' types.
This also fixes a longstanding bug that after handling an internal
dependency by recursing to the owning object, findDependentObjects
did not verify that the current target was now scheduled for deletion,
and did not apply the current recursion level's objflags to it.
Perhaps that should be back-patched; but in the back branches it
would only matter if some concurrent transaction had removed the
internal-linkage pg_depend entry before the recursive call found it,
or the recursive call somehow failed to find it, both of which seem
unlikely.
Catversion bump because the contents of pg_depend change for
partitioning relationships.
Patch HEAD only. It's annoying that we're not fixing #2 in v11,
but there seems no practical way to do so given that the problem
is exactly a poor choice of what entries to put in pg_depend.
We can't really fix that while staying compatible with what's
in pg_depend in existing v11 installations.
Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
}
|
|
|
|
|
2011-02-12 14:54:13 +01:00
|
|
|
/* Store dependency on collations */
|
2011-04-22 23:43:18 +02:00
|
|
|
/* The default collation is pinned, so don't bother recording it */
|
2018-04-12 15:37:22 +02:00
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++)
|
2011-02-12 14:54:13 +01:00
|
|
|
{
|
2011-04-22 23:43:18 +02:00
|
|
|
if (OidIsValid(collationObjectId[i]) &&
|
|
|
|
collationObjectId[i] != DEFAULT_COLLATION_OID)
|
2011-02-12 14:54:13 +01:00
|
|
|
{
|
2020-07-01 10:03:50 +02:00
|
|
|
ObjectAddressSet(referenced, CollationRelationId,
|
|
|
|
collationObjectId[i]);
|
2011-02-12 14:54:13 +01:00
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-07-30 00:14:11 +02:00
|
|
|
/* Store dependency on operator classes */
|
2018-04-07 22:00:39 +02:00
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++)
|
2002-07-30 00:14:11 +02:00
|
|
|
{
|
2020-07-01 10:03:50 +02:00
|
|
|
ObjectAddressSet(referenced, OperatorClassRelationId, classObjectId[i]);
|
2002-07-30 00:14:11 +02:00
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
|
|
|
|
}
|
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
/* Store dependencies on anything mentioned in index expressions */
|
|
|
|
if (indexInfo->ii_Expressions)
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
2003-05-28 18:04:02 +02:00
|
|
|
recordDependencyOnSingleRelExpr(&myself,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
(Node *) indexInfo->ii_Expressions,
|
2003-05-28 18:04:02 +02:00
|
|
|
heapRelationId,
|
|
|
|
DEPENDENCY_NORMAL,
|
Implement table partitioning.
Table partitioning is like table inheritance and reuses much of the
existing infrastructure, but there are some important differences.
The parent is called a partitioned table and is always empty; it may
not have indexes or non-inherited constraints, since those make no
sense for a relation with no data of its own. The children are called
partitions and contain all of the actual data. Each partition has an
implicit partitioning constraint. Multiple inheritance is not
allowed, and partitioning and inheritance can't be mixed. Partitions
can't have extra columns and may not allow nulls unless the parent
does. Tuples inserted into the parent are automatically routed to the
correct partition, so tuple-routing ON INSERT triggers are not needed.
Tuple routing isn't yet supported for partitions which are foreign
tables, and it doesn't handle updates that cross partition boundaries.
Currently, tables can be range-partitioned or list-partitioned. List
partitioning is limited to a single column, but range partitioning can
involve multiple columns. A partitioning "column" can be an
expression.
Because table partitioning is less general than table inheritance, it
is hoped that it will be easier to reason about properties of
partitions, and therefore that this will serve as a better foundation
for a variety of possible optimizations, including query planner
optimizations. The tuple routing based which this patch does based on
the implicit partitioning constraints is an example of this, but it
seems likely that many other useful optimizations are also possible.
Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat,
Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova,
Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
|
|
|
DEPENDENCY_AUTO, false);
|
2003-05-28 18:04:02 +02:00
|
|
|
}
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
/* Store dependencies on anything mentioned in predicate */
|
|
|
|
if (indexInfo->ii_Predicate)
|
|
|
|
{
|
|
|
|
recordDependencyOnSingleRelExpr(&myself,
|
2005-10-15 04:49:52 +02:00
|
|
|
(Node *) indexInfo->ii_Predicate,
|
2003-05-28 18:04:02 +02:00
|
|
|
heapRelationId,
|
|
|
|
DEPENDENCY_NORMAL,
|
Implement table partitioning.
Table partitioning is like table inheritance and reuses much of the
existing infrastructure, but there are some important differences.
The parent is called a partitioned table and is always empty; it may
not have indexes or non-inherited constraints, since those make no
sense for a relation with no data of its own. The children are called
partitions and contain all of the actual data. Each partition has an
implicit partitioning constraint. Multiple inheritance is not
allowed, and partitioning and inheritance can't be mixed. Partitions
can't have extra columns and may not allow nulls unless the parent
does. Tuples inserted into the parent are automatically routed to the
correct partition, so tuple-routing ON INSERT triggers are not needed.
Tuple routing isn't yet supported for partitions which are foreign
tables, and it doesn't handle updates that cross partition boundaries.
Currently, tables can be range-partitioned or list-partitioned. List
partitioning is limited to a single column, but range partitioning can
involve multiple columns. A partitioning "column" can be an
expression.
Because table partitioning is less general than table inheritance, it
is hoped that it will be easier to reason about properties of
partitions, and therefore that this will serve as a better foundation
for a variety of possible optimizations, including query planner
optimizations. The tuple routing based which this patch does based on
the implicit partitioning constraints is an example of this, but it
seems likely that many other useful optimizations are also possible.
Amit Langote, reviewed and tested by Robert Haas, Ashutosh Bapat,
Amit Kapila, Rajkumar Raghuwanshi, Corey Huinker, Jaime Casanova,
Rushabh Lathia, Erik Rijkers, among others. Minor revisions by me.
2016-12-07 19:17:43 +01:00
|
|
|
DEPENDENCY_AUTO, false);
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|
|
|
|
}
|
2009-07-29 22:56:21 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Bootstrap mode - assert we weren't asked for constraint support */
|
2017-11-14 15:19:05 +01:00
|
|
|
Assert((flags & INDEX_CREATE_ADD_CONSTRAINT) == 0);
|
2009-07-29 22:56:21 +02:00
|
|
|
}
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2012-10-23 23:07:26 +02:00
|
|
|
/* Post creation hook for new index */
|
2013-03-07 02:52:06 +01:00
|
|
|
InvokeObjectPostCreateHookArg(RelationRelationId,
|
|
|
|
indexRelationId, 0, is_internal);
|
2012-10-23 23:07:26 +02:00
|
|
|
|
2002-07-12 20:43:19 +02:00
|
|
|
/*
|
2005-06-25 18:53:49 +02:00
|
|
|
* Advance the command counter so that we can see the newly-entered
|
|
|
|
* catalog tuples for the index.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2001-10-07 01:21:45 +02:00
|
|
|
CommandCounterIncrement();
|
|
|
|
|
2005-06-25 18:53:49 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* In bootstrap mode, we have to fill in the index strategy structure with
|
|
|
|
* information from the catalogs. If we aren't bootstrapping, then the
|
|
|
|
* relcache entry has already been rebuilt thanks to sinval update during
|
|
|
|
* CommandCounterIncrement.
|
2005-06-25 18:53:49 +02:00
|
|
|
*/
|
|
|
|
if (IsBootstrapProcessingMode())
|
|
|
|
RelationInitIndexAccessInfo(indexRelation);
|
|
|
|
else
|
|
|
|
Assert(indexRelation->rd_indexcxt != NULL);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2018-04-07 22:00:39 +02:00
|
|
|
indexRelation->rd_index->indnkeyatts = indexInfo->ii_NumIndexKeyAttrs;
|
|
|
|
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
/* Validate opclass-specific options */
|
|
|
|
if (indexInfo->ii_OpclassOptions)
|
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++)
|
|
|
|
(void) index_opclass_options(indexRelation, i + 1,
|
|
|
|
indexInfo->ii_OpclassOptions[i],
|
|
|
|
true);
|
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If this is bootstrap (initdb) time, then we don't actually fill in the
|
|
|
|
* index yet. We'll be creating more indexes and classes later, so we
|
|
|
|
* delay filling them in until just before we're done with bootstrapping.
|
2017-11-14 15:19:05 +01:00
|
|
|
* Similarly, if the caller specified to skip the build then filling the
|
|
|
|
* index is delayed till later (ALTER TABLE can save work in some cases
|
|
|
|
* with this). Otherwise, we call the AM routine that constructs the
|
|
|
|
* index.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
|
|
|
if (IsBootstrapProcessingMode())
|
1997-03-19 08:44:45 +01:00
|
|
|
{
|
2005-08-12 03:36:05 +02:00
|
|
|
index_register(heapRelationId, indexRelationId, indexInfo);
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
2017-11-14 15:19:05 +01:00
|
|
|
else if ((flags & INDEX_CREATE_SKIP_BUILD) != 0)
|
2004-05-05 06:48:48 +02:00
|
|
|
{
|
2006-05-11 01:18:39 +02:00
|
|
|
/*
|
|
|
|
* Caller is responsible for filling the index later on. However,
|
2006-10-04 02:30:14 +02:00
|
|
|
* we'd better make sure that the heap relation is correctly marked as
|
|
|
|
* having an index.
|
2006-05-11 01:18:39 +02:00
|
|
|
*/
|
|
|
|
index_update_stats(heapRelation,
|
|
|
|
true,
|
2011-10-14 23:23:01 +02:00
|
|
|
-1.0);
|
2006-05-11 01:18:39 +02:00
|
|
|
/* Make the above update visible */
|
|
|
|
CommandCounterIncrement();
|
2004-05-05 06:48:48 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
else
|
2004-05-05 06:48:48 +02:00
|
|
|
{
|
2019-01-23 23:57:09 +01:00
|
|
|
index_build(heapRelation, indexRelation, indexInfo, false, true);
|
2004-05-05 06:48:48 +02:00
|
|
|
}
|
2001-08-10 20:57:42 +02:00
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
/*
|
2011-01-25 21:42:03 +01:00
|
|
|
* Close the index; but we keep the lock that we acquired above until end
|
2014-05-06 18:12:18 +02:00
|
|
|
* of transaction. Closing the heap is caller's responsibility.
|
2006-05-11 01:18:39 +02:00
|
|
|
*/
|
2006-07-31 22:09:10 +02:00
|
|
|
index_close(indexRelation, NoLock);
|
2006-05-11 01:18:39 +02:00
|
|
|
|
2005-08-12 03:36:05 +02:00
|
|
|
return indexRelationId;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2019-03-29 08:25:20 +01:00
|
|
|
/*
|
|
|
|
* index_concurrently_create_copy
|
|
|
|
*
|
|
|
|
* Create concurrently an index based on the definition of the one provided by
|
|
|
|
* caller. The index is inserted into catalogs and needs to be built later
|
|
|
|
* on. This is called during concurrent reindex processing.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char *newName)
|
|
|
|
{
|
|
|
|
Relation indexRelation;
|
2019-07-29 02:58:49 +02:00
|
|
|
IndexInfo *oldInfo,
|
|
|
|
*newInfo;
|
2019-03-29 08:25:20 +01:00
|
|
|
Oid newIndexId = InvalidOid;
|
|
|
|
HeapTuple indexTuple,
|
|
|
|
classTuple;
|
|
|
|
Datum indclassDatum,
|
|
|
|
colOptionDatum,
|
|
|
|
optionDatum;
|
|
|
|
oidvector *indclass;
|
|
|
|
int2vector *indcoloptions;
|
|
|
|
bool isnull;
|
|
|
|
List *indexColNames = NIL;
|
2019-07-29 02:58:49 +02:00
|
|
|
List *indexExprs = NIL;
|
|
|
|
List *indexPreds = NIL;
|
2019-03-29 08:25:20 +01:00
|
|
|
|
|
|
|
indexRelation = index_open(oldIndexId, RowExclusiveLock);
|
|
|
|
|
2019-07-29 02:58:49 +02:00
|
|
|
/* The new index needs some information from the old index */
|
|
|
|
oldInfo = BuildIndexInfo(indexRelation);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Concurrent build of an index with exclusion constraints is not
|
|
|
|
* supported.
|
|
|
|
*/
|
|
|
|
if (oldInfo->ii_ExclusionOps != NULL)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("concurrent index creation for exclusion constraints is not supported")));
|
2019-03-29 08:25:20 +01:00
|
|
|
|
|
|
|
/* Get the array of class and column options IDs from index info */
|
|
|
|
indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(oldIndexId));
|
|
|
|
if (!HeapTupleIsValid(indexTuple))
|
|
|
|
elog(ERROR, "cache lookup failed for index %u", oldIndexId);
|
|
|
|
indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
|
|
|
|
Anum_pg_index_indclass, &isnull);
|
|
|
|
Assert(!isnull);
|
|
|
|
indclass = (oidvector *) DatumGetPointer(indclassDatum);
|
|
|
|
|
|
|
|
colOptionDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
|
|
|
|
Anum_pg_index_indoption, &isnull);
|
|
|
|
Assert(!isnull);
|
|
|
|
indcoloptions = (int2vector *) DatumGetPointer(colOptionDatum);
|
|
|
|
|
|
|
|
/* Fetch options of index if any */
|
|
|
|
classTuple = SearchSysCache1(RELOID, oldIndexId);
|
|
|
|
if (!HeapTupleIsValid(classTuple))
|
|
|
|
elog(ERROR, "cache lookup failed for relation %u", oldIndexId);
|
|
|
|
optionDatum = SysCacheGetAttr(RELOID, classTuple,
|
|
|
|
Anum_pg_class_reloptions, &isnull);
|
|
|
|
|
|
|
|
/*
|
2019-07-29 02:58:49 +02:00
|
|
|
* Fetch the list of expressions and predicates directly from the
|
|
|
|
* catalogs. This cannot rely on the information from IndexInfo of the
|
|
|
|
* old index as these have been flattened for the planner.
|
|
|
|
*/
|
|
|
|
if (oldInfo->ii_Expressions != NIL)
|
|
|
|
{
|
|
|
|
Datum exprDatum;
|
|
|
|
char *exprString;
|
|
|
|
|
|
|
|
exprDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
|
|
|
|
Anum_pg_index_indexprs, &isnull);
|
|
|
|
Assert(!isnull);
|
|
|
|
exprString = TextDatumGetCString(exprDatum);
|
|
|
|
indexExprs = (List *) stringToNode(exprString);
|
|
|
|
pfree(exprString);
|
|
|
|
}
|
|
|
|
if (oldInfo->ii_Predicate != NIL)
|
|
|
|
{
|
|
|
|
Datum predDatum;
|
|
|
|
char *predString;
|
|
|
|
|
|
|
|
predDatum = SysCacheGetAttr(INDEXRELID, indexTuple,
|
|
|
|
Anum_pg_index_indpred, &isnull);
|
|
|
|
Assert(!isnull);
|
|
|
|
predString = TextDatumGetCString(predDatum);
|
|
|
|
indexPreds = (List *) stringToNode(predString);
|
|
|
|
|
|
|
|
/* Also convert to implicit-AND format */
|
|
|
|
indexPreds = make_ands_implicit((Expr *) indexPreds);
|
|
|
|
pfree(predString);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build the index information for the new index. Note that rebuild of
|
|
|
|
* indexes with exclusion constraints is not supported, hence there is no
|
|
|
|
* need to fill all the ii_Exclusion* fields.
|
|
|
|
*/
|
|
|
|
newInfo = makeIndexInfo(oldInfo->ii_NumIndexAttrs,
|
|
|
|
oldInfo->ii_NumIndexKeyAttrs,
|
|
|
|
oldInfo->ii_Am,
|
|
|
|
indexExprs,
|
|
|
|
indexPreds,
|
|
|
|
oldInfo->ii_Unique,
|
|
|
|
false, /* not ready for inserts */
|
|
|
|
true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the list of column names and the column numbers for the new
|
|
|
|
* index information. All this information will be used for the index
|
|
|
|
* creation.
|
2019-03-29 08:25:20 +01:00
|
|
|
*/
|
2019-07-29 02:58:49 +02:00
|
|
|
for (int i = 0; i < oldInfo->ii_NumIndexAttrs; i++)
|
2019-03-29 08:25:20 +01:00
|
|
|
{
|
|
|
|
TupleDesc indexTupDesc = RelationGetDescr(indexRelation);
|
|
|
|
Form_pg_attribute att = TupleDescAttr(indexTupDesc, i);
|
|
|
|
|
|
|
|
indexColNames = lappend(indexColNames, NameStr(att->attname));
|
2019-07-29 02:58:49 +02:00
|
|
|
newInfo->ii_IndexAttrNumbers[i] = oldInfo->ii_IndexAttrNumbers[i];
|
2019-03-29 08:25:20 +01:00
|
|
|
}
|
|
|
|
|
2019-04-12 08:36:05 +02:00
|
|
|
/*
|
|
|
|
* Now create the new index.
|
|
|
|
*
|
|
|
|
* For a partition index, we adjust the partition dependency later, to
|
|
|
|
* ensure a consistent state at all times. That is why parentIndexRelid
|
|
|
|
* is not set here.
|
|
|
|
*/
|
2019-03-29 08:25:20 +01:00
|
|
|
newIndexId = index_create(heapRelation,
|
|
|
|
newName,
|
|
|
|
InvalidOid, /* indexRelationId */
|
|
|
|
InvalidOid, /* parentIndexRelid */
|
|
|
|
InvalidOid, /* parentConstraintId */
|
|
|
|
InvalidOid, /* relFileNode */
|
2019-07-29 02:58:49 +02:00
|
|
|
newInfo,
|
2019-03-29 08:25:20 +01:00
|
|
|
indexColNames,
|
|
|
|
indexRelation->rd_rel->relam,
|
|
|
|
indexRelation->rd_rel->reltablespace,
|
|
|
|
indexRelation->rd_indcollation,
|
|
|
|
indclass->values,
|
|
|
|
indcoloptions->values,
|
|
|
|
optionDatum,
|
|
|
|
INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT,
|
|
|
|
0,
|
2019-05-22 18:55:34 +02:00
|
|
|
true, /* allow table to be a system catalog? */
|
|
|
|
false, /* is_internal? */
|
2019-03-29 08:25:20 +01:00
|
|
|
NULL);
|
|
|
|
|
|
|
|
/* Close the relations used and clean up */
|
|
|
|
index_close(indexRelation, NoLock);
|
|
|
|
ReleaseSysCache(indexTuple);
|
|
|
|
ReleaseSysCache(classTuple);
|
|
|
|
|
|
|
|
return newIndexId;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* index_concurrently_build
|
|
|
|
*
|
|
|
|
* Build index for a concurrent operation. Low-level locks are taken when
|
|
|
|
* this operation is performed to prevent only schema changes, but they need
|
|
|
|
* to be kept until the end of the transaction performing this operation.
|
|
|
|
* 'indexOid' refers to an index relation OID already created as part of
|
|
|
|
* previous processing, and 'heapOid' refers to its parent heap relation.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
index_concurrently_build(Oid heapRelationId,
|
|
|
|
Oid indexRelationId)
|
|
|
|
{
|
|
|
|
Relation heapRel;
|
|
|
|
Relation indexRelation;
|
|
|
|
IndexInfo *indexInfo;
|
|
|
|
|
|
|
|
/* This had better make sure that a snapshot is active */
|
|
|
|
Assert(ActiveSnapshotSet());
|
|
|
|
|
|
|
|
/* Open and lock the parent heap relation */
|
|
|
|
heapRel = table_open(heapRelationId, ShareUpdateExclusiveLock);
|
|
|
|
|
|
|
|
/* And the target index relation */
|
|
|
|
indexRelation = index_open(indexRelationId, RowExclusiveLock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to re-build the IndexInfo struct, since it was lost in the
|
|
|
|
* commit of the transaction where this concurrent index was created at
|
|
|
|
* the catalog level.
|
|
|
|
*/
|
|
|
|
indexInfo = BuildIndexInfo(indexRelation);
|
|
|
|
Assert(!indexInfo->ii_ReadyForInserts);
|
|
|
|
indexInfo->ii_Concurrent = true;
|
|
|
|
indexInfo->ii_BrokenHotChain = false;
|
|
|
|
|
|
|
|
/* Now build the index */
|
|
|
|
index_build(heapRel, indexRelation, indexInfo, false, true);
|
|
|
|
|
|
|
|
/* Close both the relations, but keep the locks */
|
|
|
|
table_close(heapRel, NoLock);
|
|
|
|
index_close(indexRelation, NoLock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the pg_index row to mark the index as ready for inserts. Once we
|
|
|
|
* commit this transaction, any new transactions that open the table must
|
|
|
|
* insert new entries into the index for insertions and non-HOT updates.
|
|
|
|
*/
|
|
|
|
index_set_state_flags(indexRelationId, INDEX_CREATE_SET_READY);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* index_concurrently_swap
|
|
|
|
*
|
|
|
|
* Swap name, dependencies, and constraints of the old index over to the new
|
|
|
|
* index, while marking the old index as invalid and the new as valid.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
|
|
|
|
{
|
|
|
|
Relation pg_class,
|
|
|
|
pg_index,
|
|
|
|
pg_constraint,
|
|
|
|
pg_trigger;
|
|
|
|
Relation oldClassRel,
|
|
|
|
newClassRel;
|
|
|
|
HeapTuple oldClassTuple,
|
|
|
|
newClassTuple;
|
|
|
|
Form_pg_class oldClassForm,
|
|
|
|
newClassForm;
|
|
|
|
HeapTuple oldIndexTuple,
|
|
|
|
newIndexTuple;
|
|
|
|
Form_pg_index oldIndexForm,
|
|
|
|
newIndexForm;
|
2019-10-29 03:08:09 +01:00
|
|
|
bool isPartition;
|
2019-03-29 08:25:20 +01:00
|
|
|
Oid indexConstraintOid;
|
|
|
|
List *constraintOids = NIL;
|
|
|
|
ListCell *lc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take a necessary lock on the old and new index before swapping them.
|
|
|
|
*/
|
|
|
|
oldClassRel = relation_open(oldIndexId, ShareUpdateExclusiveLock);
|
|
|
|
newClassRel = relation_open(newIndexId, ShareUpdateExclusiveLock);
|
|
|
|
|
|
|
|
/* Now swap names and dependencies of those indexes */
|
|
|
|
pg_class = table_open(RelationRelationId, RowExclusiveLock);
|
|
|
|
|
|
|
|
oldClassTuple = SearchSysCacheCopy1(RELOID,
|
|
|
|
ObjectIdGetDatum(oldIndexId));
|
|
|
|
if (!HeapTupleIsValid(oldClassTuple))
|
|
|
|
elog(ERROR, "could not find tuple for relation %u", oldIndexId);
|
|
|
|
newClassTuple = SearchSysCacheCopy1(RELOID,
|
|
|
|
ObjectIdGetDatum(newIndexId));
|
|
|
|
if (!HeapTupleIsValid(newClassTuple))
|
|
|
|
elog(ERROR, "could not find tuple for relation %u", newIndexId);
|
|
|
|
|
|
|
|
oldClassForm = (Form_pg_class) GETSTRUCT(oldClassTuple);
|
|
|
|
newClassForm = (Form_pg_class) GETSTRUCT(newClassTuple);
|
|
|
|
|
|
|
|
/* Swap the names */
|
|
|
|
namestrcpy(&newClassForm->relname, NameStr(oldClassForm->relname));
|
|
|
|
namestrcpy(&oldClassForm->relname, oldName);
|
|
|
|
|
2019-10-29 03:08:09 +01:00
|
|
|
/* Swap the partition flags to track inheritance properly */
|
|
|
|
isPartition = newClassForm->relispartition;
|
2019-04-12 08:36:05 +02:00
|
|
|
newClassForm->relispartition = oldClassForm->relispartition;
|
2019-10-29 03:08:09 +01:00
|
|
|
oldClassForm->relispartition = isPartition;
|
2019-04-12 08:36:05 +02:00
|
|
|
|
2019-03-29 08:25:20 +01:00
|
|
|
CatalogTupleUpdate(pg_class, &oldClassTuple->t_self, oldClassTuple);
|
|
|
|
CatalogTupleUpdate(pg_class, &newClassTuple->t_self, newClassTuple);
|
|
|
|
|
|
|
|
heap_freetuple(oldClassTuple);
|
|
|
|
heap_freetuple(newClassTuple);
|
|
|
|
|
|
|
|
/* Now swap index info */
|
|
|
|
pg_index = table_open(IndexRelationId, RowExclusiveLock);
|
|
|
|
|
|
|
|
oldIndexTuple = SearchSysCacheCopy1(INDEXRELID,
|
|
|
|
ObjectIdGetDatum(oldIndexId));
|
|
|
|
if (!HeapTupleIsValid(oldIndexTuple))
|
|
|
|
elog(ERROR, "could not find tuple for relation %u", oldIndexId);
|
|
|
|
newIndexTuple = SearchSysCacheCopy1(INDEXRELID,
|
|
|
|
ObjectIdGetDatum(newIndexId));
|
|
|
|
if (!HeapTupleIsValid(newIndexTuple))
|
|
|
|
elog(ERROR, "could not find tuple for relation %u", newIndexId);
|
|
|
|
|
|
|
|
oldIndexForm = (Form_pg_index) GETSTRUCT(oldIndexTuple);
|
|
|
|
newIndexForm = (Form_pg_index) GETSTRUCT(newIndexTuple);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy constraint flags from the old index. This is safe because the old
|
|
|
|
* index guaranteed uniqueness.
|
|
|
|
*/
|
|
|
|
newIndexForm->indisprimary = oldIndexForm->indisprimary;
|
|
|
|
oldIndexForm->indisprimary = false;
|
|
|
|
newIndexForm->indisexclusion = oldIndexForm->indisexclusion;
|
|
|
|
oldIndexForm->indisexclusion = false;
|
|
|
|
newIndexForm->indimmediate = oldIndexForm->indimmediate;
|
|
|
|
oldIndexForm->indimmediate = true;
|
|
|
|
|
2020-06-05 03:26:02 +02:00
|
|
|
/* Preserve indisreplident in the new index */
|
|
|
|
newIndexForm->indisreplident = oldIndexForm->indisreplident;
|
|
|
|
oldIndexForm->indisreplident = false;
|
|
|
|
|
2020-03-03 02:12:28 +01:00
|
|
|
/* Preserve indisclustered in the new index */
|
|
|
|
newIndexForm->indisclustered = oldIndexForm->indisclustered;
|
|
|
|
|
|
|
|
/*
|
2020-04-21 21:58:42 +02:00
|
|
|
* Mark the new index as valid, and the old index as invalid similarly to
|
|
|
|
* what index_set_state_flags() does.
|
2020-03-03 02:12:28 +01:00
|
|
|
*/
|
2019-03-29 08:25:20 +01:00
|
|
|
newIndexForm->indisvalid = true;
|
|
|
|
oldIndexForm->indisvalid = false;
|
|
|
|
oldIndexForm->indisclustered = false;
|
|
|
|
|
|
|
|
CatalogTupleUpdate(pg_index, &oldIndexTuple->t_self, oldIndexTuple);
|
|
|
|
CatalogTupleUpdate(pg_index, &newIndexTuple->t_self, newIndexTuple);
|
|
|
|
|
|
|
|
heap_freetuple(oldIndexTuple);
|
|
|
|
heap_freetuple(newIndexTuple);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move constraints and triggers over to the new index
|
|
|
|
*/
|
|
|
|
|
|
|
|
constraintOids = get_index_ref_constraints(oldIndexId);
|
|
|
|
|
|
|
|
indexConstraintOid = get_index_constraint(oldIndexId);
|
|
|
|
|
|
|
|
if (OidIsValid(indexConstraintOid))
|
|
|
|
constraintOids = lappend_oid(constraintOids, indexConstraintOid);
|
|
|
|
|
|
|
|
pg_constraint = table_open(ConstraintRelationId, RowExclusiveLock);
|
|
|
|
pg_trigger = table_open(TriggerRelationId, RowExclusiveLock);
|
|
|
|
|
|
|
|
foreach(lc, constraintOids)
|
|
|
|
{
|
|
|
|
HeapTuple constraintTuple,
|
|
|
|
triggerTuple;
|
|
|
|
Form_pg_constraint conForm;
|
|
|
|
ScanKeyData key[1];
|
|
|
|
SysScanDesc scan;
|
|
|
|
Oid constraintOid = lfirst_oid(lc);
|
|
|
|
|
|
|
|
/* Move the constraint from the old to the new index */
|
|
|
|
constraintTuple = SearchSysCacheCopy1(CONSTROID,
|
|
|
|
ObjectIdGetDatum(constraintOid));
|
|
|
|
if (!HeapTupleIsValid(constraintTuple))
|
|
|
|
elog(ERROR, "could not find tuple for constraint %u", constraintOid);
|
|
|
|
|
|
|
|
conForm = ((Form_pg_constraint) GETSTRUCT(constraintTuple));
|
|
|
|
|
|
|
|
if (conForm->conindid == oldIndexId)
|
|
|
|
{
|
|
|
|
conForm->conindid = newIndexId;
|
|
|
|
|
|
|
|
CatalogTupleUpdate(pg_constraint, &constraintTuple->t_self, constraintTuple);
|
|
|
|
}
|
|
|
|
|
|
|
|
heap_freetuple(constraintTuple);
|
|
|
|
|
|
|
|
/* Search for trigger records */
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_trigger_tgconstraint,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(constraintOid));
|
|
|
|
|
|
|
|
scan = systable_beginscan(pg_trigger, TriggerConstraintIndexId, true,
|
|
|
|
NULL, 1, key);
|
|
|
|
|
|
|
|
while (HeapTupleIsValid((triggerTuple = systable_getnext(scan))))
|
|
|
|
{
|
|
|
|
Form_pg_trigger tgForm = (Form_pg_trigger) GETSTRUCT(triggerTuple);
|
|
|
|
|
|
|
|
if (tgForm->tgconstrindid != oldIndexId)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Make a modifiable copy */
|
|
|
|
triggerTuple = heap_copytuple(triggerTuple);
|
|
|
|
tgForm = (Form_pg_trigger) GETSTRUCT(triggerTuple);
|
|
|
|
|
|
|
|
tgForm->tgconstrindid = newIndexId;
|
|
|
|
|
|
|
|
CatalogTupleUpdate(pg_trigger, &triggerTuple->t_self, triggerTuple);
|
|
|
|
|
|
|
|
heap_freetuple(triggerTuple);
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move comment if any
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
Relation description;
|
|
|
|
ScanKeyData skey[3];
|
|
|
|
SysScanDesc sd;
|
|
|
|
HeapTuple tuple;
|
|
|
|
Datum values[Natts_pg_description] = {0};
|
|
|
|
bool nulls[Natts_pg_description] = {0};
|
|
|
|
bool replaces[Natts_pg_description] = {0};
|
|
|
|
|
|
|
|
values[Anum_pg_description_objoid - 1] = ObjectIdGetDatum(newIndexId);
|
|
|
|
replaces[Anum_pg_description_objoid - 1] = true;
|
|
|
|
|
|
|
|
ScanKeyInit(&skey[0],
|
|
|
|
Anum_pg_description_objoid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(oldIndexId));
|
|
|
|
ScanKeyInit(&skey[1],
|
|
|
|
Anum_pg_description_classoid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(RelationRelationId));
|
|
|
|
ScanKeyInit(&skey[2],
|
|
|
|
Anum_pg_description_objsubid,
|
|
|
|
BTEqualStrategyNumber, F_INT4EQ,
|
|
|
|
Int32GetDatum(0));
|
|
|
|
|
|
|
|
description = table_open(DescriptionRelationId, RowExclusiveLock);
|
|
|
|
|
|
|
|
sd = systable_beginscan(description, DescriptionObjIndexId, true,
|
|
|
|
NULL, 3, skey);
|
|
|
|
|
|
|
|
while ((tuple = systable_getnext(sd)) != NULL)
|
|
|
|
{
|
|
|
|
tuple = heap_modify_tuple(tuple, RelationGetDescr(description),
|
|
|
|
values, nulls, replaces);
|
|
|
|
CatalogTupleUpdate(description, &tuple->t_self, tuple);
|
|
|
|
|
2019-05-22 18:55:34 +02:00
|
|
|
break; /* Assume there can be only one match */
|
2019-03-29 08:25:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(sd);
|
|
|
|
table_close(description, NoLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-04-12 08:36:05 +02:00
|
|
|
* Swap inheritance relationship with parent index
|
2019-03-29 08:25:20 +01:00
|
|
|
*/
|
2019-04-12 08:36:05 +02:00
|
|
|
if (get_rel_relispartition(oldIndexId))
|
2019-03-29 08:25:20 +01:00
|
|
|
{
|
2019-05-22 18:55:34 +02:00
|
|
|
List *ancestors = get_partition_ancestors(oldIndexId);
|
|
|
|
Oid parentIndexRelid = linitial_oid(ancestors);
|
2019-03-29 08:25:20 +01:00
|
|
|
|
2019-04-12 08:36:05 +02:00
|
|
|
DeleteInheritsTuple(oldIndexId, parentIndexRelid);
|
|
|
|
StoreSingleInheritance(newIndexId, parentIndexRelid, 1);
|
2019-03-29 08:25:20 +01:00
|
|
|
|
2019-04-12 08:36:05 +02:00
|
|
|
list_free(ancestors);
|
2019-03-29 08:25:20 +01:00
|
|
|
}
|
|
|
|
|
2019-04-12 08:36:05 +02:00
|
|
|
/*
|
2020-03-05 04:50:15 +01:00
|
|
|
* Swap all dependencies of and on the old index to the new one, and
|
|
|
|
* vice-versa. Note that a call to CommandCounterIncrement() would cause
|
|
|
|
* duplicate entries in pg_depend, so this should not be done.
|
2019-04-12 08:36:05 +02:00
|
|
|
*/
|
2020-03-05 04:50:15 +01:00
|
|
|
changeDependenciesOf(RelationRelationId, newIndexId, oldIndexId);
|
|
|
|
changeDependenciesOn(RelationRelationId, newIndexId, oldIndexId);
|
|
|
|
|
2019-04-12 08:36:05 +02:00
|
|
|
changeDependenciesOf(RelationRelationId, oldIndexId, newIndexId);
|
2019-03-29 08:25:20 +01:00
|
|
|
changeDependenciesOn(RelationRelationId, oldIndexId, newIndexId);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy over statistics from old to new index
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
PgStat_StatTabEntry *tabentry;
|
|
|
|
|
|
|
|
tabentry = pgstat_fetch_stat_tabentry(oldIndexId);
|
|
|
|
if (tabentry)
|
|
|
|
{
|
|
|
|
if (newClassRel->pgstat_info)
|
|
|
|
{
|
|
|
|
newClassRel->pgstat_info->t_counts.t_numscans = tabentry->numscans;
|
|
|
|
newClassRel->pgstat_info->t_counts.t_tuples_returned = tabentry->tuples_returned;
|
|
|
|
newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
|
|
|
|
newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
|
|
|
|
newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
|
2019-05-22 18:55:34 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The data will be sent by the next pgstat_report_stat()
|
|
|
|
* call.
|
|
|
|
*/
|
2019-03-29 08:25:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Close relations */
|
|
|
|
table_close(pg_class, RowExclusiveLock);
|
|
|
|
table_close(pg_index, RowExclusiveLock);
|
|
|
|
table_close(pg_constraint, RowExclusiveLock);
|
|
|
|
table_close(pg_trigger, RowExclusiveLock);
|
|
|
|
|
|
|
|
/* The lock taken previously is not released until the end of transaction */
|
|
|
|
relation_close(oldClassRel, NoLock);
|
|
|
|
relation_close(newClassRel, NoLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* index_concurrently_set_dead
|
|
|
|
*
|
|
|
|
* Perform the last invalidation stage of DROP INDEX CONCURRENTLY or REINDEX
|
|
|
|
* CONCURRENTLY before actually dropping the index. After calling this
|
|
|
|
* function, the index is seen by all the backends as dead. Low-level locks
|
|
|
|
* taken here are kept until the end of the transaction calling this function.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
index_concurrently_set_dead(Oid heapId, Oid indexId)
|
|
|
|
{
|
|
|
|
Relation userHeapRelation;
|
|
|
|
Relation userIndexRelation;
|
|
|
|
|
|
|
|
/*
|
2019-05-22 18:55:34 +02:00
|
|
|
* No more predicate locks will be acquired on this index, and we're about
|
|
|
|
* to stop doing inserts into the index which could show conflicts with
|
|
|
|
* existing predicate locks, so now is the time to move them to the heap
|
|
|
|
* relation.
|
2019-03-29 08:25:20 +01:00
|
|
|
*/
|
|
|
|
userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
|
|
|
|
userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
|
|
|
|
TransferPredicateLocksToHeapRelation(userIndexRelation);
|
|
|
|
|
|
|
|
/*
|
2019-05-22 18:55:34 +02:00
|
|
|
* Now we are sure that nobody uses the index for queries; they just might
|
|
|
|
* have it open for updating it. So now we can unset indisready and
|
|
|
|
* indislive, then wait till nobody could be using it at all anymore.
|
2019-03-29 08:25:20 +01:00
|
|
|
*/
|
|
|
|
index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
|
|
|
|
|
|
|
|
/*
|
2019-05-22 18:55:34 +02:00
|
|
|
* Invalidate the relcache for the table, so that after this commit all
|
|
|
|
* sessions will refresh the table's index list. Forgetting just the
|
|
|
|
* index's relcache entry is not enough.
|
2019-03-29 08:25:20 +01:00
|
|
|
*/
|
|
|
|
CacheInvalidateRelcache(userHeapRelation);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Close the relations again, though still holding session lock.
|
|
|
|
*/
|
|
|
|
table_close(userHeapRelation, NoLock);
|
|
|
|
index_close(userIndexRelation, NoLock);
|
|
|
|
}
|
|
|
|
|
2011-01-25 21:42:03 +01:00
|
|
|
/*
|
|
|
|
* index_constraint_create
|
|
|
|
*
|
2015-03-25 21:17:56 +01:00
|
|
|
* Set up a constraint associated with an index. Return the new constraint's
|
|
|
|
* address.
|
2011-01-25 21:42:03 +01:00
|
|
|
*
|
|
|
|
* heapRelation: table owning the index (must be suitably locked by caller)
|
|
|
|
* indexRelationId: OID of the index
|
2018-02-19 20:59:37 +01:00
|
|
|
* parentConstraintId: if constraint is on a partition, the OID of the
|
|
|
|
* constraint in the parent.
|
2011-01-25 21:42:03 +01:00
|
|
|
* indexInfo: same info executor uses to insert into the index
|
|
|
|
* constraintName: what it say (generally, should match name of index)
|
|
|
|
* constraintType: one of CONSTRAINT_PRIMARY, CONSTRAINT_UNIQUE, or
|
|
|
|
* CONSTRAINT_EXCLUSION
|
2017-11-14 15:19:05 +01:00
|
|
|
* flags: bitmask that can include any combination of these bits:
|
|
|
|
* INDEX_CONSTR_CREATE_MARK_AS_PRIMARY: index is a PRIMARY KEY
|
|
|
|
* INDEX_CONSTR_CREATE_DEFERRABLE: constraint is DEFERRABLE
|
|
|
|
* INDEX_CONSTR_CREATE_INIT_DEFERRED: constraint is INITIALLY DEFERRED
|
|
|
|
* INDEX_CONSTR_CREATE_UPDATE_INDEX: update the pg_index row
|
|
|
|
* INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS: remove existing dependencies
|
|
|
|
* of index on table's columns
|
2011-01-25 21:42:03 +01:00
|
|
|
* allow_system_table_mods: allow table to be a system catalog
|
2013-03-18 03:55:14 +01:00
|
|
|
* is_internal: index is constructed due to internal process
|
2011-01-25 21:42:03 +01:00
|
|
|
*/
|
2015-03-25 21:17:56 +01:00
|
|
|
ObjectAddress
|
2011-01-25 21:42:03 +01:00
|
|
|
index_constraint_create(Relation heapRelation,
|
|
|
|
Oid indexRelationId,
|
2018-02-19 20:59:37 +01:00
|
|
|
Oid parentConstraintId,
|
2011-01-25 21:42:03 +01:00
|
|
|
IndexInfo *indexInfo,
|
|
|
|
const char *constraintName,
|
|
|
|
char constraintType,
|
2017-11-14 15:19:05 +01:00
|
|
|
bits16 constr_flags,
|
2013-03-18 03:55:14 +01:00
|
|
|
bool allow_system_table_mods,
|
|
|
|
bool is_internal)
|
2011-01-25 21:42:03 +01:00
|
|
|
{
|
|
|
|
Oid namespaceId = RelationGetNamespace(heapRelation);
|
|
|
|
ObjectAddress myself,
|
2019-03-21 22:34:29 +01:00
|
|
|
idxaddr;
|
2011-01-25 21:42:03 +01:00
|
|
|
Oid conOid;
|
2017-11-14 15:19:05 +01:00
|
|
|
bool deferrable;
|
|
|
|
bool initdeferred;
|
|
|
|
bool mark_as_primary;
|
2018-02-19 20:59:37 +01:00
|
|
|
bool islocal;
|
|
|
|
bool noinherit;
|
|
|
|
int inhcount;
|
2017-11-14 15:19:05 +01:00
|
|
|
|
|
|
|
deferrable = (constr_flags & INDEX_CONSTR_CREATE_DEFERRABLE) != 0;
|
|
|
|
initdeferred = (constr_flags & INDEX_CONSTR_CREATE_INIT_DEFERRED) != 0;
|
|
|
|
mark_as_primary = (constr_flags & INDEX_CONSTR_CREATE_MARK_AS_PRIMARY) != 0;
|
2011-01-25 21:42:03 +01:00
|
|
|
|
|
|
|
/* constraint creation support doesn't work while bootstrapping */
|
|
|
|
Assert(!IsBootstrapProcessingMode());
|
|
|
|
|
|
|
|
/* enforce system-table restriction */
|
|
|
|
if (!allow_system_table_mods &&
|
|
|
|
IsSystemRelation(heapRelation) &&
|
|
|
|
IsNormalProcessingMode())
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("user-defined indexes on system catalog tables are not supported")));
|
|
|
|
|
|
|
|
/* primary/unique constraints shouldn't have any expressions */
|
|
|
|
if (indexInfo->ii_Expressions &&
|
|
|
|
constraintType != CONSTRAINT_EXCLUSION)
|
|
|
|
elog(ERROR, "constraints cannot have index expressions");
|
|
|
|
|
2012-08-11 18:51:24 +02:00
|
|
|
/*
|
|
|
|
* If we're manufacturing a constraint for a pre-existing index, we need
|
|
|
|
* to get rid of the existing auto dependencies for the index (the ones
|
|
|
|
* that index_create() would have made instead of calling this function).
|
|
|
|
*
|
|
|
|
* Note: this code would not necessarily do the right thing if the index
|
|
|
|
* has any expressions or predicate, but we'd never be turning such an
|
|
|
|
* index into a UNIQUE or PRIMARY KEY constraint.
|
|
|
|
*/
|
2017-11-14 15:19:05 +01:00
|
|
|
if (constr_flags & INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS)
|
2012-08-11 18:51:24 +02:00
|
|
|
deleteDependencyRecordsForClass(RelationRelationId, indexRelationId,
|
|
|
|
RelationRelationId, DEPENDENCY_AUTO);
|
|
|
|
|
2018-02-19 20:59:37 +01:00
|
|
|
if (OidIsValid(parentConstraintId))
|
|
|
|
{
|
|
|
|
islocal = false;
|
|
|
|
inhcount = 1;
|
|
|
|
noinherit = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
islocal = true;
|
|
|
|
inhcount = 0;
|
|
|
|
noinherit = true;
|
|
|
|
}
|
|
|
|
|
2011-01-25 21:42:03 +01:00
|
|
|
/*
|
|
|
|
* Construct a pg_constraint entry.
|
|
|
|
*/
|
|
|
|
conOid = CreateConstraintEntry(constraintName,
|
|
|
|
namespaceId,
|
|
|
|
constraintType,
|
|
|
|
deferrable,
|
|
|
|
initdeferred,
|
2011-02-08 13:23:20 +01:00
|
|
|
true,
|
2018-03-23 14:48:22 +01:00
|
|
|
parentConstraintId,
|
2011-01-25 21:42:03 +01:00
|
|
|
RelationGetRelid(heapRelation),
|
2018-04-12 12:02:45 +02:00
|
|
|
indexInfo->ii_IndexAttrNumbers,
|
2018-04-07 22:00:39 +02:00
|
|
|
indexInfo->ii_NumIndexKeyAttrs,
|
2011-01-25 21:42:03 +01:00
|
|
|
indexInfo->ii_NumIndexAttrs,
|
|
|
|
InvalidOid, /* no domain */
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
indexRelationId, /* index OID */
|
2011-01-25 21:42:03 +01:00
|
|
|
InvalidOid, /* no foreign key */
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
' ',
|
|
|
|
' ',
|
|
|
|
' ',
|
|
|
|
indexInfo->ii_ExclusionOps,
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
|
|
|
NULL, /* no check constraint */
|
2011-01-25 21:42:03 +01:00
|
|
|
NULL,
|
2018-02-19 20:59:37 +01:00
|
|
|
islocal,
|
|
|
|
inhcount,
|
|
|
|
noinherit,
|
2013-03-18 03:55:14 +01:00
|
|
|
is_internal);
|
2011-01-25 21:42:03 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register the index as internally dependent on the constraint.
|
|
|
|
*
|
2012-08-11 18:51:24 +02:00
|
|
|
* Note that the constraint has a dependency on the table, so we don't
|
|
|
|
* need (or want) any direct dependency from the index to the table.
|
2011-01-25 21:42:03 +01:00
|
|
|
*/
|
2019-03-21 22:34:29 +01:00
|
|
|
ObjectAddressSet(myself, ConstraintRelationId, conOid);
|
|
|
|
ObjectAddressSet(idxaddr, RelationRelationId, indexRelationId);
|
|
|
|
recordDependencyOn(&idxaddr, &myself, DEPENDENCY_INTERNAL);
|
2011-01-25 21:42:03 +01:00
|
|
|
|
2018-02-19 20:59:37 +01:00
|
|
|
/*
|
Redesign the partition dependency mechanism.
The original setup for dependencies of partitioned objects had
serious problems:
1. It did not verify that a drop cascading to a partition-child object
also cascaded to at least one of the object's partition parents. Now,
normally a child object would share all its dependencies with one or
another parent (e.g. a child index's opclass dependencies would be shared
with the parent index), so that this oversight is usually harmless.
But if some dependency failed to fit this pattern, the child could be
dropped while all its parents remain, creating a logically broken
situation. (It's easy to construct artificial cases that break it,
such as attaching an unrelated extension dependency to the child object
and then dropping the extension. I'm not sure if any less-artificial
cases exist.)
2. Management of partition dependencies during ATTACH/DETACH PARTITION
was complicated and buggy; for example, after detaching a partition
table it was possible to create cases where a formerly-child index
should be dropped and was not, because the correct set of dependencies
had not been reconstructed.
Less seriously, because multiple partition relationships were
represented identically in pg_depend, there was an order-of-traversal
dependency on which partition parent was cited in error messages.
We also had some pre-existing order-of-traversal hazards for error
messages related to internal and extension dependencies. This is
cosmetic to users but causes testing problems.
To fix #1, add a check at the end of the partition tree traversal
to ensure that at least one partition parent got deleted. To fix #2,
establish a new policy that partition dependencies are in addition to,
not instead of, a child object's usual dependencies; in this way
ATTACH/DETACH PARTITION need not cope with adding or removing the
usual dependencies.
To fix the cosmetic problem, distinguish between primary and secondary
partition dependency entries in pg_depend, by giving them different
deptypes. (They behave identically except for having different
priorities for being cited in error messages.) This means that the
former 'I' dependency type is replaced with new 'P' and 'S' types.
This also fixes a longstanding bug that after handling an internal
dependency by recursing to the owning object, findDependentObjects
did not verify that the current target was now scheduled for deletion,
and did not apply the current recursion level's objflags to it.
Perhaps that should be back-patched; but in the back branches it
would only matter if some concurrent transaction had removed the
internal-linkage pg_depend entry before the recursive call found it,
or the recursive call somehow failed to find it, both of which seem
unlikely.
Catversion bump because the contents of pg_depend change for
partitioning relationships.
Patch HEAD only. It's annoying that we're not fixing #2 in v11,
but there seems no practical way to do so given that the problem
is exactly a poor choice of what entries to put in pg_depend.
We can't really fix that while staying compatible with what's
in pg_depend in existing v11 installations.
Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
|
|
|
* Also, if this is a constraint on a partition, give it partition-type
|
|
|
|
* dependencies on the parent constraint as well as the table.
|
2018-02-19 20:59:37 +01:00
|
|
|
*/
|
|
|
|
if (OidIsValid(parentConstraintId))
|
|
|
|
{
|
2019-05-22 18:55:34 +02:00
|
|
|
ObjectAddress referenced;
|
2019-03-21 22:34:29 +01:00
|
|
|
|
Redesign the partition dependency mechanism.
The original setup for dependencies of partitioned objects had
serious problems:
1. It did not verify that a drop cascading to a partition-child object
also cascaded to at least one of the object's partition parents. Now,
normally a child object would share all its dependencies with one or
another parent (e.g. a child index's opclass dependencies would be shared
with the parent index), so that this oversight is usually harmless.
But if some dependency failed to fit this pattern, the child could be
dropped while all its parents remain, creating a logically broken
situation. (It's easy to construct artificial cases that break it,
such as attaching an unrelated extension dependency to the child object
and then dropping the extension. I'm not sure if any less-artificial
cases exist.)
2. Management of partition dependencies during ATTACH/DETACH PARTITION
was complicated and buggy; for example, after detaching a partition
table it was possible to create cases where a formerly-child index
should be dropped and was not, because the correct set of dependencies
had not been reconstructed.
Less seriously, because multiple partition relationships were
represented identically in pg_depend, there was an order-of-traversal
dependency on which partition parent was cited in error messages.
We also had some pre-existing order-of-traversal hazards for error
messages related to internal and extension dependencies. This is
cosmetic to users but causes testing problems.
To fix #1, add a check at the end of the partition tree traversal
to ensure that at least one partition parent got deleted. To fix #2,
establish a new policy that partition dependencies are in addition to,
not instead of, a child object's usual dependencies; in this way
ATTACH/DETACH PARTITION need not cope with adding or removing the
usual dependencies.
To fix the cosmetic problem, distinguish between primary and secondary
partition dependency entries in pg_depend, by giving them different
deptypes. (They behave identically except for having different
priorities for being cited in error messages.) This means that the
former 'I' dependency type is replaced with new 'P' and 'S' types.
This also fixes a longstanding bug that after handling an internal
dependency by recursing to the owning object, findDependentObjects
did not verify that the current target was now scheduled for deletion,
and did not apply the current recursion level's objflags to it.
Perhaps that should be back-patched; but in the back branches it
would only matter if some concurrent transaction had removed the
internal-linkage pg_depend entry before the recursive call found it,
or the recursive call somehow failed to find it, both of which seem
unlikely.
Catversion bump because the contents of pg_depend change for
partitioning relationships.
Patch HEAD only. It's annoying that we're not fixing #2 in v11,
but there seems no practical way to do so given that the problem
is exactly a poor choice of what entries to put in pg_depend.
We can't really fix that while staying compatible with what's
in pg_depend in existing v11 installations.
Discussion: https://postgr.es/m/CAH2-Wzkypv1R+teZrr71U23J578NnTBt2X8+Y=Odr4pOdW1rXg@mail.gmail.com
2019-02-11 20:41:13 +01:00
|
|
|
ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId);
|
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
|
|
|
|
ObjectAddressSet(referenced, RelationRelationId,
|
|
|
|
RelationGetRelid(heapRelation));
|
|
|
|
recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
|
2018-02-19 20:59:37 +01:00
|
|
|
}
|
|
|
|
|
2011-01-25 21:42:03 +01:00
|
|
|
/*
|
|
|
|
* If the constraint is deferrable, create the deferred uniqueness
|
2011-04-10 17:42:00 +02:00
|
|
|
* checking trigger. (The trigger will be given an internal dependency on
|
|
|
|
* the constraint by CreateTrigger.)
|
2011-01-25 21:42:03 +01:00
|
|
|
*/
|
|
|
|
if (deferrable)
|
|
|
|
{
|
|
|
|
CreateTrigStmt *trigger;
|
|
|
|
|
|
|
|
trigger = makeNode(CreateTrigStmt);
|
|
|
|
trigger->trigname = (constraintType == CONSTRAINT_PRIMARY) ?
|
|
|
|
"PK_ConstraintTrigger" :
|
|
|
|
"Unique_ConstraintTrigger";
|
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
2014-02-17 15:33:31 +01:00
|
|
|
trigger->relation = NULL;
|
2011-01-25 21:42:03 +01:00
|
|
|
trigger->funcname = SystemFuncName("unique_key_recheck");
|
|
|
|
trigger->args = NIL;
|
|
|
|
trigger->row = true;
|
|
|
|
trigger->timing = TRIGGER_TYPE_AFTER;
|
|
|
|
trigger->events = TRIGGER_TYPE_INSERT | TRIGGER_TYPE_UPDATE;
|
|
|
|
trigger->columns = NIL;
|
|
|
|
trigger->whenClause = NULL;
|
|
|
|
trigger->isconstraint = true;
|
|
|
|
trigger->deferrable = true;
|
|
|
|
trigger->initdeferred = initdeferred;
|
|
|
|
trigger->constrrel = NULL;
|
|
|
|
|
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
2014-02-17 15:33:31 +01:00
|
|
|
(void) CreateTrigger(trigger, NULL, RelationGetRelid(heapRelation),
|
2018-03-23 14:48:22 +01:00
|
|
|
InvalidOid, conOid, indexRelationId, InvalidOid,
|
|
|
|
InvalidOid, NULL, true, false);
|
2011-01-25 21:42:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If needed, mark the index as primary and/or deferred in pg_index.
|
|
|
|
*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Note: When making an existing index into a constraint, caller must have
|
|
|
|
* a table lock that prevents concurrent table updates; otherwise, there
|
|
|
|
* is a risk that concurrent readers of the table will miss seeing this
|
|
|
|
* index at all.
|
2011-01-25 21:42:03 +01:00
|
|
|
*/
|
2017-11-14 15:19:05 +01:00
|
|
|
if ((constr_flags & INDEX_CONSTR_CREATE_UPDATE_INDEX) &&
|
|
|
|
(mark_as_primary || deferrable))
|
2011-01-25 21:42:03 +01:00
|
|
|
{
|
2011-04-10 17:42:00 +02:00
|
|
|
Relation pg_index;
|
|
|
|
HeapTuple indexTuple;
|
|
|
|
Form_pg_index indexForm;
|
|
|
|
bool dirty = false;
|
2011-01-25 21:42:03 +01:00
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_index = table_open(IndexRelationId, RowExclusiveLock);
|
2011-01-25 21:42:03 +01:00
|
|
|
|
|
|
|
indexTuple = SearchSysCacheCopy1(INDEXRELID,
|
|
|
|
ObjectIdGetDatum(indexRelationId));
|
|
|
|
if (!HeapTupleIsValid(indexTuple))
|
|
|
|
elog(ERROR, "cache lookup failed for index %u", indexRelationId);
|
|
|
|
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
|
|
|
|
|
|
|
if (mark_as_primary && !indexForm->indisprimary)
|
|
|
|
{
|
|
|
|
indexForm->indisprimary = true;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (deferrable && indexForm->indimmediate)
|
|
|
|
{
|
|
|
|
indexForm->indimmediate = false;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dirty)
|
|
|
|
{
|
2017-01-31 22:42:24 +01:00
|
|
|
CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple);
|
2013-03-18 03:55:14 +01:00
|
|
|
|
|
|
|
InvokeObjectPostAlterHookArg(IndexRelationId, indexRelationId, 0,
|
|
|
|
InvalidOid, is_internal);
|
2011-01-25 21:42:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
heap_freetuple(indexTuple);
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_index, RowExclusiveLock);
|
2011-01-25 21:42:03 +01:00
|
|
|
}
|
2015-03-25 21:17:56 +01:00
|
|
|
|
2019-03-21 22:34:29 +01:00
|
|
|
return myself;
|
2011-01-25 21:42:03 +01:00
|
|
|
}
|
|
|
|
|
2002-07-12 20:43:19 +02:00
|
|
|
/*
|
1999-12-10 04:56:14 +01:00
|
|
|
* index_drop
|
1997-09-07 07:04:48 +02:00
|
|
|
*
|
2002-07-12 20:43:19 +02:00
|
|
|
* NOTE: this routine should now only be called through performDeletion(),
|
|
|
|
* else associated dependencies won't be cleaned up.
|
2019-03-29 08:25:20 +01:00
|
|
|
*
|
|
|
|
* If concurrent is true, do a DROP INDEX CONCURRENTLY. If concurrent is
|
|
|
|
* false but concurrent_lock_mode is true, then do a normal DROP INDEX but
|
|
|
|
* take a lock for CONCURRENTLY processing. That is used as part of REINDEX
|
|
|
|
* CONCURRENTLY.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
void
|
2019-03-29 08:25:20 +01:00
|
|
|
index_drop(Oid indexId, bool concurrent, bool concurrent_lock_mode)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2000-06-17 23:49:04 +02:00
|
|
|
Oid heapId;
|
1999-11-21 21:01:10 +01:00
|
|
|
Relation userHeapRelation;
|
|
|
|
Relation userIndexRelation;
|
1997-09-08 04:41:22 +02:00
|
|
|
Relation indexRelation;
|
|
|
|
HeapTuple tuple;
|
2004-02-15 22:01:39 +01:00
|
|
|
bool hasexprs;
|
2012-04-06 11:21:40 +02:00
|
|
|
LockRelId heaprelid,
|
|
|
|
indexrelid;
|
2012-04-07 22:44:43 +02:00
|
|
|
LOCKTAG heaplocktag;
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
LOCKMODE lockmode;
|
1999-05-25 18:15:34 +02:00
|
|
|
|
Fix concurrent indexing operations with temporary tables
Attempting to use CREATE INDEX, DROP INDEX or REINDEX with CONCURRENTLY
on a temporary relation with ON COMMIT actions triggered unexpected
errors because those operations use multiple transactions internally to
complete their work. Here is for example one confusing error when using
ON COMMIT DELETE ROWS:
ERROR: index "foo" already contains data
Issues related to temporary relations and concurrent indexing are fixed
in this commit by enforcing the non-concurrent path to be taken for
temporary relations even if using CONCURRENTLY, transparently to the
user. Using a non-concurrent path does not matter in practice as locks
cannot be taken on a temporary relation by a session different than the
one owning the relation, and the non-concurrent operation is more
effective.
The problem exists with REINDEX since v12 with the introduction of
CONCURRENTLY, and with CREATE/DROP INDEX since CONCURRENTLY exists for
those commands. In all supported versions, this caused only confusing
error messages to be generated. Note that with REINDEX, it was also
possible to issue a REINDEX CONCURRENTLY for a temporary relation owned
by a different session, leading to a server crash.
The idea to enforce transparently the non-concurrent code path for
temporary relations comes originally from Andres Freund.
Reported-by: Manuel Rigger
Author: Michael Paquier, Heikki Linnakangas
Reviewed-by: Andres Freund, Álvaro Herrera, Heikki Linnakangas
Discussion: https://postgr.es/m/CA+u7OA6gP7YAeCguyseusYcc=uR8+ypjCcgDDCTzjQ+k6S9ksQ@mail.gmail.com
Backpatch-through: 9.4
2020-01-22 01:49:18 +01:00
|
|
|
/*
|
|
|
|
* A temporary relation uses a non-concurrent DROP. Other backends can't
|
|
|
|
* access a temporary relation, so there's no harm in grabbing a stronger
|
|
|
|
* lock (see comments in RemoveRelations), and a non-concurrent DROP is
|
|
|
|
* more efficient.
|
|
|
|
*/
|
|
|
|
Assert(get_rel_persistence(indexId) != RELPERSISTENCE_TEMP ||
|
|
|
|
(!concurrent && !concurrent_lock_mode));
|
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
|
|
|
* To drop an index safely, we must grab exclusive lock on its parent
|
2009-05-31 22:55:37 +02:00
|
|
|
* table. Exclusive lock on the index alone is insufficient because
|
|
|
|
* another backend might be about to execute a query on the parent table.
|
|
|
|
* If it relies on a previously cached list of index OIDs, then it could
|
|
|
|
* attempt to access the just-dropped index. We must therefore take a
|
|
|
|
* table lock strong enough to prevent all queries on the table from
|
|
|
|
* proceeding until we commit and send out a shared-cache-inval notice
|
|
|
|
* that will make them update their index lists.
|
2012-10-18 19:58:30 +02:00
|
|
|
*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* In the concurrent case we avoid this requirement by disabling index use
|
|
|
|
* in multiple steps and waiting out any transactions that might be using
|
|
|
|
* the index, so we don't need exclusive lock on the parent table. Instead
|
|
|
|
* we take ShareUpdateExclusiveLock, to ensure that two sessions aren't
|
2014-05-06 18:12:18 +02:00
|
|
|
* doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* AccessExclusiveLock on the index below, once we're sure nobody else is
|
|
|
|
* using it.)
|
1999-09-18 21:08:25 +02:00
|
|
|
*/
|
Improve table locking behavior in the face of current DDL.
In the previous coding, callers were faced with an awkward choice:
look up the name, do permissions checks, and then lock the table; or
look up the name, lock the table, and then do permissions checks.
The first choice was wrong because the results of the name lookup
and permissions checks might be out-of-date by the time the table
lock was acquired, while the second allowed a user with no privileges
to interfere with access to a table by users who do have privileges
(e.g. if a malicious backend queues up for an AccessExclusiveLock on
a table on which AccessShareLock is already held, further attempts
to access the table will be blocked until the AccessExclusiveLock
is obtained and the malicious backend's transaction rolls back).
To fix, allow callers of RangeVarGetRelid() to pass a callback which
gets executed after performing the name lookup but before acquiring
the relation lock. If the name lookup is retried (because
invalidation messages are received), the callback will be re-executed
as well, so we get the best of both worlds. RangeVarGetRelid() is
renamed to RangeVarGetRelidExtended(); callers not wishing to supply
a callback can continue to invoke it as RangeVarGetRelid(), which is
now a macro. Since the only one caller that uses nowait = true now
passes a callback anyway, the RangeVarGetRelid() macro defaults nowait
as well. The callback can also be used for supplemental locking - for
example, REINDEX INDEX needs to acquire the table lock before the index
lock to reduce deadlock possibilities.
There's a lot more work to be done here to fix all the cases where this
can be a problem, but this commit provides the general infrastructure
and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE,
LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE.
Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
|
|
|
heapId = IndexGetRelation(indexId, false);
|
2019-03-29 08:25:20 +01:00
|
|
|
lockmode = (concurrent || concurrent_lock_mode) ? ShareUpdateExclusiveLock : AccessExclusiveLock;
|
2019-01-21 19:32:19 +01:00
|
|
|
userHeapRelation = table_open(heapId, lockmode);
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
userIndexRelation = index_open(indexId, lockmode);
|
1999-09-18 21:08:25 +02:00
|
|
|
|
2011-02-15 21:49:54 +01:00
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* We might still have open queries using it in our own session, which the
|
|
|
|
* above locking won't prevent, so test explicitly.
|
2011-02-15 21:49:54 +01:00
|
|
|
*/
|
|
|
|
CheckTableNotInUse(userIndexRelation, "DROP INDEX");
|
|
|
|
|
2012-04-06 11:21:40 +02:00
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* Drop Index Concurrently is more or less the reverse process of Create
|
|
|
|
* Index Concurrently.
|
2012-10-18 19:58:30 +02:00
|
|
|
*
|
|
|
|
* First we unset indisvalid so queries starting afterwards don't use the
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* index to answer queries anymore. We have to keep indisready = true so
|
|
|
|
* transactions that are still scanning the index can continue to see
|
|
|
|
* valid index contents. For instance, if they are using READ COMMITTED
|
|
|
|
* mode, and another transaction makes changes and commits, they need to
|
|
|
|
* see those new tuples in the index.
|
2012-10-18 19:58:30 +02:00
|
|
|
*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* After all transactions that could possibly have used the index for
|
|
|
|
* queries end, we can unset indisready and indislive, then wait till
|
|
|
|
* nobody could be touching it anymore. (Note: we need indislive because
|
|
|
|
* this state must be distinct from the initial state during CREATE INDEX
|
|
|
|
* CONCURRENTLY, which has indislive true while indisready and indisvalid
|
|
|
|
* are false. That's because in that state, transactions must examine the
|
|
|
|
* index for HOT-safety decisions, while in this state we don't want them
|
|
|
|
* to open it at all.)
|
|
|
|
*
|
|
|
|
* Since all predicate locks on the index are about to be made invalid, we
|
|
|
|
* must promote them to predicate locks on the heap. In the
|
|
|
|
* non-concurrent case we can just do that now. In the concurrent case
|
|
|
|
* it's a bit trickier. The predicate locks must be moved when there are
|
|
|
|
* no index scans in progress on the index and no more can subsequently
|
2014-05-06 18:12:18 +02:00
|
|
|
* start, so that no new predicate locks can be made on the index. Also,
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* they must be moved before heap inserts stop maintaining the index, else
|
|
|
|
* the conflict with the predicate lock on the index gap could be missed
|
|
|
|
* before the lock on the heap relation is in place to detect a conflict
|
|
|
|
* based on the heap tuple insert.
|
2012-04-06 11:21:40 +02:00
|
|
|
*/
|
|
|
|
if (concurrent)
|
|
|
|
{
|
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* We must commit our transaction in order to make the first pg_index
|
2014-05-06 18:12:18 +02:00
|
|
|
* state update visible to other sessions. If the DROP machinery has
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* already performed any other actions (removal of other objects,
|
|
|
|
* pg_depend entries, etc), the commit would make those actions
|
|
|
|
* permanent, which would leave us with inconsistent catalog state if
|
2014-05-06 18:12:18 +02:00
|
|
|
* we fail partway through the following sequence. Since DROP INDEX
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* CONCURRENTLY is restricted to dropping just one index that has no
|
|
|
|
* dependencies, we should get here before anything's been done ---
|
|
|
|
* but let's check that to be sure. We can verify that the current
|
|
|
|
* transaction has not executed any transactional updates by checking
|
|
|
|
* that no XID has been assigned.
|
2012-04-06 11:21:40 +02:00
|
|
|
*/
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
if (GetTopTransactionIdIfAny() != InvalidTransactionId)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("DROP INDEX CONCURRENTLY must be first action in transaction")));
|
2012-04-06 11:21:40 +02:00
|
|
|
|
2012-10-18 19:58:30 +02:00
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* Mark index invalid by updating its pg_index entry
|
2012-10-18 19:58:30 +02:00
|
|
|
*/
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
index_set_state_flags(indexId, INDEX_DROP_CLEAR_VALID);
|
2012-04-06 11:21:40 +02:00
|
|
|
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
/*
|
|
|
|
* Invalidate the relcache for the table, so that after this commit
|
|
|
|
* all sessions will refresh any cached plans that might reference the
|
|
|
|
* index.
|
|
|
|
*/
|
|
|
|
CacheInvalidateRelcache(userHeapRelation);
|
2012-04-06 11:21:40 +02:00
|
|
|
|
|
|
|
/* save lockrelid and locktag for below, then close but keep locks */
|
|
|
|
heaprelid = userHeapRelation->rd_lockInfo.lockRelId;
|
|
|
|
SET_LOCKTAG_RELATION(heaplocktag, heaprelid.dbId, heaprelid.relId);
|
|
|
|
indexrelid = userIndexRelation->rd_lockInfo.lockRelId;
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(userHeapRelation, NoLock);
|
2012-04-06 11:21:40 +02:00
|
|
|
index_close(userIndexRelation, NoLock);
|
|
|
|
|
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* We must commit our current transaction so that the indisvalid
|
|
|
|
* update becomes visible to other transactions; then start another.
|
|
|
|
* Note that any previously-built data structures are lost in the
|
2014-05-06 18:12:18 +02:00
|
|
|
* commit. The only data we keep past here are the relation IDs.
|
2012-04-06 11:21:40 +02:00
|
|
|
*
|
|
|
|
* Before committing, get a session-level lock on the table, to ensure
|
|
|
|
* that neither it nor the index can be dropped before we finish. This
|
2012-06-10 21:20:04 +02:00
|
|
|
* cannot block, even if someone else is waiting for access, because
|
|
|
|
* we already have the same lock within our transaction.
|
2012-04-06 11:21:40 +02:00
|
|
|
*/
|
|
|
|
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
|
|
|
|
LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock);
|
|
|
|
|
|
|
|
PopActiveSnapshot();
|
|
|
|
CommitTransactionCommand();
|
|
|
|
StartTransactionCommand();
|
|
|
|
|
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* Now we must wait until no running transaction could be using the
|
2014-01-03 17:22:03 +01:00
|
|
|
* index for a query. Use AccessExclusiveLock here to check for
|
2014-05-06 18:12:18 +02:00
|
|
|
* running transactions that hold locks of any kind on the table. Note
|
|
|
|
* we do not need to worry about xacts that open the table for reading
|
|
|
|
* after this point; they will see the index as invalid when they open
|
|
|
|
* the relation.
|
2012-04-06 11:21:40 +02:00
|
|
|
*
|
2012-06-10 21:20:04 +02:00
|
|
|
* Note: the reason we use actual lock acquisition here, rather than
|
|
|
|
* just checking the ProcArray and sleeping, is that deadlock is
|
|
|
|
* possible if one of the transactions in question is blocked trying
|
|
|
|
* to acquire an exclusive lock on our table. The lock code will
|
|
|
|
* detect deadlock and error out properly.
|
2019-10-18 12:18:50 +02:00
|
|
|
*
|
|
|
|
* Note: we report progress through WaitForLockers() unconditionally
|
|
|
|
* here, even though it will only be used when we're called by REINDEX
|
|
|
|
* CONCURRENTLY and not when called by DROP INDEX CONCURRENTLY.
|
2012-04-06 11:21:40 +02:00
|
|
|
*/
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
WaitForLockers(heaplocktag, AccessExclusiveLock, true);
|
2012-04-06 11:21:40 +02:00
|
|
|
|
2019-03-29 08:25:20 +01:00
|
|
|
/* Finish invalidation of index and mark it as dead */
|
|
|
|
index_concurrently_set_dead(heapId, indexId);
|
2012-10-18 19:58:30 +02:00
|
|
|
|
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* Again, commit the transaction to make the pg_index update visible
|
|
|
|
* to other sessions.
|
2012-10-18 19:58:30 +02:00
|
|
|
*/
|
|
|
|
CommitTransactionCommand();
|
|
|
|
StartTransactionCommand();
|
|
|
|
|
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* Wait till every transaction that saw the old index state has
|
2019-10-18 12:18:50 +02:00
|
|
|
* finished. See above about progress reporting.
|
2012-10-18 19:58:30 +02:00
|
|
|
*/
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
WaitForLockers(heaplocktag, AccessExclusiveLock, true);
|
2012-10-18 19:58:30 +02:00
|
|
|
|
2012-04-06 11:21:40 +02:00
|
|
|
/*
|
|
|
|
* Re-open relations to allow us to complete our actions.
|
|
|
|
*
|
|
|
|
* At this point, nothing should be accessing the index, but lets
|
|
|
|
* leave nothing to chance and grab AccessExclusiveLock on the index
|
|
|
|
* before the physical deletion.
|
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
|
2012-04-06 11:21:40 +02:00
|
|
|
userIndexRelation = index_open(indexId, AccessExclusiveLock);
|
|
|
|
}
|
2012-10-21 23:35:42 +02:00
|
|
|
else
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
{
|
|
|
|
/* Not concurrent, so just transfer predicate locks and we're good */
|
2012-10-21 23:35:42 +02:00
|
|
|
TransferPredicateLocksToHeapRelation(userIndexRelation);
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
}
|
2011-06-08 12:47:21 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
* Schedule physical removal of the files (if any)
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
if (userIndexRelation->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)
|
|
|
|
RelationDropStorage(userIndexRelation);
|
2002-09-04 22:31:48 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Close and flush the index's relcache entry, to ensure relcache doesn't
|
|
|
|
* try to rebuild it while we're deleting catalog entries. We keep the
|
|
|
|
* lock though.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2006-07-31 22:09:10 +02:00
|
|
|
index_close(userIndexRelation, NoLock);
|
2004-08-28 23:05:26 +02:00
|
|
|
|
|
|
|
RelationForgetRelation(indexId);
|
1996-07-09 08:22:35 +02:00
|
|
|
|
2001-03-22 07:16:21 +01:00
|
|
|
/*
|
2004-02-15 22:01:39 +01:00
|
|
|
* fix INDEX relation, and check for expressional index
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
indexRelation = table_open(IndexRelationId, RowExclusiveLock);
|
1999-09-18 21:08:25 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId));
|
2000-11-16 23:30:52 +01:00
|
|
|
if (!HeapTupleIsValid(tuple))
|
2003-07-21 03:59:11 +02:00
|
|
|
elog(ERROR, "cache lookup failed for index %u", indexId);
|
1998-08-21 00:07:46 +02:00
|
|
|
|
2018-03-28 02:13:52 +02:00
|
|
|
hasexprs = !heap_attisnull(tuple, Anum_pg_index_indexprs,
|
|
|
|
RelationGetDescr(indexRelation));
|
2004-02-15 22:01:39 +01:00
|
|
|
|
2017-02-01 22:13:30 +01:00
|
|
|
CatalogTupleDelete(indexRelation, &tuple->t_self);
|
2002-07-14 23:08:08 +02:00
|
|
|
|
|
|
|
ReleaseSysCache(tuple);
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(indexRelation, RowExclusiveLock);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2004-02-15 22:01:39 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* if it has any expression columns, we might have stored statistics about
|
|
|
|
* them.
|
2004-02-15 22:01:39 +01:00
|
|
|
*/
|
|
|
|
if (hasexprs)
|
2004-08-28 23:05:26 +02:00
|
|
|
RemoveStatistics(indexId, 0);
|
2004-02-15 22:01:39 +01:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
2004-08-28 23:05:26 +02:00
|
|
|
* fix ATTRIBUTE relation
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
2004-08-28 23:05:26 +02:00
|
|
|
DeleteAttributeTuples(indexId);
|
1998-06-13 22:22:54 +02:00
|
|
|
|
2004-08-28 23:05:26 +02:00
|
|
|
/*
|
|
|
|
* fix RELATION relation
|
|
|
|
*/
|
|
|
|
DeleteRelationTuple(indexId);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
/*
|
|
|
|
* fix INHERITS relation
|
|
|
|
*/
|
|
|
|
DeleteInheritsTuple(indexId, InvalidOid);
|
|
|
|
|
2002-03-03 18:47:56 +01:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* We are presently too lazy to attempt to compute the new correct value
|
|
|
|
* of relhasindex (the next VACUUM will fix it if necessary). So there is
|
|
|
|
* no need to update the pg_class tuple for the owning relation. But we
|
|
|
|
* must send out a shared-cache-inval notice on the owning relation to
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* ensure other backends update their relcache lists of indexes. (In the
|
|
|
|
* concurrent case, this is redundant but harmless.)
|
2002-03-03 18:47:56 +01:00
|
|
|
*/
|
2004-02-10 02:55:27 +01:00
|
|
|
CacheInvalidateRelcache(userHeapRelation);
|
2002-03-03 18:47:56 +01:00
|
|
|
|
1999-11-21 21:01:10 +01:00
|
|
|
/*
|
2004-08-28 23:05:26 +02:00
|
|
|
* Close owning rel, but keep lock
|
1999-11-21 21:01:10 +01:00
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(userHeapRelation, NoLock);
|
2012-04-06 11:21:40 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the session locks before we go.
|
|
|
|
*/
|
|
|
|
if (concurrent)
|
|
|
|
{
|
|
|
|
UnlockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
|
|
|
|
UnlockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock);
|
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* index_build support
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2000-07-15 00:18:02 +02:00
|
|
|
|
|
|
|
/* ----------------
|
|
|
|
* BuildIndexInfo
|
2003-05-28 18:04:02 +02:00
|
|
|
* Construct an IndexInfo record for an open index
|
2000-07-15 00:18:02 +02:00
|
|
|
*
|
|
|
|
* IndexInfo stores the information about the index that's needed by
|
|
|
|
* FormIndexDatum, which is used for both index_build() and later insertion
|
2014-05-06 18:12:18 +02:00
|
|
|
* of individual index tuples. Normally we build an IndexInfo for an index
|
2000-07-15 00:18:02 +02:00
|
|
|
* just once per command, and then use it for (potentially) many tuples.
|
|
|
|
* ----------------
|
|
|
|
*/
|
2001-10-25 07:50:21 +02:00
|
|
|
IndexInfo *
|
2003-05-28 18:04:02 +02:00
|
|
|
BuildIndexInfo(Relation index)
|
2000-07-15 00:18:02 +02:00
|
|
|
{
|
2019-08-04 04:18:57 +02:00
|
|
|
IndexInfo *ii;
|
2003-05-28 18:04:02 +02:00
|
|
|
Form_pg_index indexStruct = index->rd_index;
|
2000-07-15 00:18:02 +02:00
|
|
|
int i;
|
2018-04-07 22:00:39 +02:00
|
|
|
int numAtts;
|
2000-07-15 00:18:02 +02:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
/* check the number of keys, and copy attr numbers into the IndexInfo */
|
2018-04-07 22:00:39 +02:00
|
|
|
numAtts = indexStruct->indnatts;
|
|
|
|
if (numAtts < 1 || numAtts > INDEX_MAX_KEYS)
|
2003-05-28 18:04:02 +02:00
|
|
|
elog(ERROR, "invalid indnatts %d for index %u",
|
2018-04-07 22:00:39 +02:00
|
|
|
numAtts, RelationGetRelid(index));
|
|
|
|
|
2019-08-04 04:18:57 +02:00
|
|
|
/*
|
|
|
|
* Create the node, fetching any expressions needed for expressional
|
|
|
|
* indexes and index predicate if any.
|
|
|
|
*/
|
|
|
|
ii = makeIndexInfo(indexStruct->indnatts,
|
|
|
|
indexStruct->indnkeyatts,
|
|
|
|
index->rd_rel->relam,
|
|
|
|
RelationGetIndexExpressions(index),
|
|
|
|
RelationGetIndexPredicate(index),
|
|
|
|
indexStruct->indisunique,
|
|
|
|
indexStruct->indisready,
|
|
|
|
false);
|
|
|
|
|
|
|
|
/* fill in attribute numbers */
|
2018-04-07 22:00:39 +02:00
|
|
|
for (i = 0; i < numAtts; i++)
|
2018-04-12 12:02:45 +02:00
|
|
|
ii->ii_IndexAttrNumbers[i] = indexStruct->indkey.values[i];
|
2000-07-15 00:18:02 +02:00
|
|
|
|
2009-12-07 06:22:23 +01:00
|
|
|
/* fetch exclusion constraint info if any */
|
2011-01-25 23:51:59 +01:00
|
|
|
if (indexStruct->indisexclusion)
|
2009-12-07 06:22:23 +01:00
|
|
|
{
|
|
|
|
RelationGetExclusionInfo(index,
|
|
|
|
&ii->ii_ExclusionOps,
|
|
|
|
&ii->ii_ExclusionProcs,
|
|
|
|
&ii->ii_ExclusionStrats);
|
|
|
|
}
|
Allow index AMs to cache data across aminsert calls within a SQL command.
It's always been possible for index AMs to cache data across successive
amgettuple calls within a single SQL command: the IndexScanDesc.opaque
field is meant for precisely that. However, no comparable facility
exists for amortizing setup work across successive aminsert calls.
This patch adds such a feature and teaches GIN, GIST, and BRIN to use it
to amortize catalog lookups they'd previously been doing on every call.
(The other standard index AMs keep everything they need in the relcache,
so there's little to improve there.)
For GIN, the overall improvement in a statement that inserts many rows
can be as much as 10%, though it seems a bit less for the other two.
In addition, this makes a really significant difference in runtime
for CLOBBER_CACHE_ALWAYS tests, since in those builds the repeated
catalog lookups are vastly more expensive.
The reason this has been hard up to now is that the aminsert function is
not passed any useful place to cache per-statement data. What I chose to
do is to add suitable fields to struct IndexInfo and pass that to aminsert.
That's not widening the index AM API very much because IndexInfo is already
within the ken of ambuild; in fact, by passing the same info to aminsert
as to ambuild, this is really removing an inconsistency in the AM API.
Discussion: https://postgr.es/m/27568.1486508680@sss.pgh.pa.us
2017-02-09 17:52:12 +01:00
|
|
|
|
Implement operator class parameters
PostgreSQL provides set of template index access methods, where opclasses have
much freedom in the semantics of indexing. These index AMs are GiST, GIN,
SP-GiST and BRIN. There opclasses define representation of keys, operations on
them and supported search strategies. So, it's natural that opclasses may be
faced some tradeoffs, which require user-side decision. This commit implements
opclass parameters allowing users to set some values, which tell opclass how to
index the particular dataset.
This commit doesn't introduce new storage in system catalog. Instead it uses
pg_attribute.attoptions, which is used for table column storage options but
unused for index attributes.
In order to evade changing signature of each opclass support function, we
implement unified way to pass options to opclass support functions. Options
are set to fn_expr as the constant bytea expression. It's possible due to the
fact that opclass support functions are executed outside of expressions, so
fn_expr is unused for them.
This commit comes with some examples of opclass options usage. We parametrize
signature length in GiST. That applies to multiple opclasses: tsvector_ops,
gist__intbig_ops, gist_ltree_ops, gist__ltree_ops, gist_trgm_ops and
gist_hstore_ops. Also we parametrize maximum number of integer ranges for
gist__int_ops. However, the main future usage of this feature is expected
to be json, where users would be able to specify which way to index particular
json parts.
Catversion is bumped.
Discussion: https://postgr.es/m/d22c3a18-31c7-1879-fc11-4c1ce2f5e5af%40postgrespro.ru
Author: Nikita Glukhov, revised by me
Reviwed-by: Nikolay Shaplov, Robert Haas, Tom Lane, Tomas Vondra, Alvaro Herrera
2020-03-30 18:17:11 +02:00
|
|
|
ii->ii_OpclassOptions = RelationGetIndexRawAttOptions(index);
|
|
|
|
|
2000-07-15 00:18:02 +02:00
|
|
|
return ii;
|
|
|
|
}
|
|
|
|
|
Fix misbehavior with expression indexes on ON COMMIT DELETE ROWS tables.
We implement ON COMMIT DELETE ROWS by truncating tables marked that
way, which requires also truncating/rebuilding their indexes. But
RelationTruncateIndexes asks the relcache for up-to-date copies of any
index expressions, which may cause execution of eval_const_expressions
on them, which can result in actual execution of subexpressions.
This is a bad thing to have happening during ON COMMIT. Manuel Rigger
reported that use of a SQL function resulted in crashes due to
expectations that ActiveSnapshot would be set, which it isn't.
The most obvious fix perhaps would be to push a snapshot during
PreCommit_on_commit_actions, but I think that would just open the door
to more problems: CommitTransaction explicitly expects that no
user-defined code can be running at this point.
Fortunately, since we know that no tuples exist to be indexed, there
seems no need to use the real index expressions or predicates during
RelationTruncateIndexes. We can set up dummy index expressions
instead (we do need something that will expose the right data type,
as there are places that build index tupdescs based on this), and
just ignore predicates and exclusion constraints.
In a green field it'd likely be better to reimplement ON COMMIT DELETE
ROWS using the same "init fork" infrastructure used for unlogged
relations. That seems impractical without catalog changes though,
and even without that it'd be too big a change to back-patch.
So for now do it like this.
Per private report from Manuel Rigger. This has been broken forever,
so back-patch to all supported branches.
2019-12-01 19:09:26 +01:00
|
|
|
/* ----------------
|
|
|
|
* BuildDummyIndexInfo
|
|
|
|
* Construct a dummy IndexInfo record for an open index
|
|
|
|
*
|
|
|
|
* This differs from the real BuildIndexInfo in that it will never run any
|
|
|
|
* user-defined code that might exist in index expressions or predicates.
|
|
|
|
* Instead of the real index expressions, we return null constants that have
|
|
|
|
* the right types/typmods/collations. Predicates and exclusion clauses are
|
|
|
|
* just ignored. This is sufficient for the purpose of truncating an index,
|
|
|
|
* since we will not need to actually evaluate the expressions or predicates;
|
|
|
|
* the only thing that's likely to be done with the data is construction of
|
|
|
|
* a tupdesc describing the index's rowtype.
|
|
|
|
* ----------------
|
|
|
|
*/
|
|
|
|
IndexInfo *
|
|
|
|
BuildDummyIndexInfo(Relation index)
|
|
|
|
{
|
|
|
|
IndexInfo *ii;
|
|
|
|
Form_pg_index indexStruct = index->rd_index;
|
|
|
|
int i;
|
|
|
|
int numAtts;
|
|
|
|
|
|
|
|
/* check the number of keys, and copy attr numbers into the IndexInfo */
|
|
|
|
numAtts = indexStruct->indnatts;
|
|
|
|
if (numAtts < 1 || numAtts > INDEX_MAX_KEYS)
|
|
|
|
elog(ERROR, "invalid indnatts %d for index %u",
|
|
|
|
numAtts, RelationGetRelid(index));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create the node, using dummy index expressions, and pretending there is
|
|
|
|
* no predicate.
|
|
|
|
*/
|
|
|
|
ii = makeIndexInfo(indexStruct->indnatts,
|
|
|
|
indexStruct->indnkeyatts,
|
|
|
|
index->rd_rel->relam,
|
|
|
|
RelationGetDummyIndexExpressions(index),
|
|
|
|
NIL,
|
|
|
|
indexStruct->indisunique,
|
|
|
|
indexStruct->indisready,
|
|
|
|
false);
|
|
|
|
|
|
|
|
/* fill in attribute numbers */
|
|
|
|
for (i = 0; i < numAtts; i++)
|
|
|
|
ii->ii_IndexAttrNumbers[i] = indexStruct->indkey.values[i];
|
|
|
|
|
|
|
|
/* We ignore the exclusion constraint if any */
|
|
|
|
|
|
|
|
return ii;
|
|
|
|
}
|
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
/*
|
|
|
|
* CompareIndexInfo
|
|
|
|
* Return whether the properties of two indexes (in different tables)
|
|
|
|
* indicate that they have the "same" definitions.
|
|
|
|
*
|
|
|
|
* Note: passing collations and opfamilies separately is a kludge. Adding
|
|
|
|
* them to IndexInfo may result in better coding here and elsewhere.
|
|
|
|
*
|
2019-12-18 08:23:02 +01:00
|
|
|
* Use build_attrmap_by_name(index2, index1) to build the attmap.
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
CompareIndexInfo(IndexInfo *info1, IndexInfo *info2,
|
|
|
|
Oid *collations1, Oid *collations2,
|
|
|
|
Oid *opfamilies1, Oid *opfamilies2,
|
2019-12-18 08:23:02 +01:00
|
|
|
AttrMap *attmap)
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
{
|
2018-04-26 20:47:16 +02:00
|
|
|
int i;
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
|
|
|
|
if (info1->ii_Unique != info2->ii_Unique)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* indexes are only equivalent if they have the same access method */
|
|
|
|
if (info1->ii_Am != info2->ii_Am)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* and same number of attributes */
|
|
|
|
if (info1->ii_NumIndexAttrs != info2->ii_NumIndexAttrs)
|
|
|
|
return false;
|
|
|
|
|
2018-04-12 15:37:22 +02:00
|
|
|
/* and same number of key attributes */
|
|
|
|
if (info1->ii_NumIndexKeyAttrs != info2->ii_NumIndexKeyAttrs)
|
|
|
|
return false;
|
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
/*
|
|
|
|
* and columns match through the attribute map (actual attribute numbers
|
|
|
|
* might differ!) Note that this implies that index columns that are
|
|
|
|
* expressions appear in the same positions. We will next compare the
|
|
|
|
* expressions themselves.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < info1->ii_NumIndexAttrs; i++)
|
|
|
|
{
|
2019-12-18 08:23:02 +01:00
|
|
|
if (attmap->maplen < info2->ii_IndexAttrNumbers[i])
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
elog(ERROR, "incorrect attribute map");
|
|
|
|
|
2018-01-19 20:34:44 +01:00
|
|
|
/* ignore expressions at this stage */
|
2018-04-12 12:02:45 +02:00
|
|
|
if ((info1->ii_IndexAttrNumbers[i] != InvalidAttrNumber) &&
|
2019-12-18 08:23:02 +01:00
|
|
|
(attmap->attnums[info2->ii_IndexAttrNumbers[i] - 1] !=
|
2018-04-26 20:47:16 +02:00
|
|
|
info1->ii_IndexAttrNumbers[i]))
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
return false;
|
|
|
|
|
2018-04-12 15:37:22 +02:00
|
|
|
/* collation and opfamily is not valid for including columns */
|
|
|
|
if (i >= info1->ii_NumIndexKeyAttrs)
|
|
|
|
continue;
|
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
if (collations1[i] != collations2[i])
|
|
|
|
return false;
|
|
|
|
if (opfamilies1[i] != opfamilies2[i])
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For expression indexes: either both are expression indexes, or neither
|
|
|
|
* is; if they are, make sure the expressions match.
|
|
|
|
*/
|
|
|
|
if ((info1->ii_Expressions != NIL) != (info2->ii_Expressions != NIL))
|
|
|
|
return false;
|
|
|
|
if (info1->ii_Expressions != NIL)
|
|
|
|
{
|
2018-04-26 20:47:16 +02:00
|
|
|
bool found_whole_row;
|
|
|
|
Node *mapped;
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
|
|
|
|
mapped = map_variable_attnos((Node *) info2->ii_Expressions,
|
2019-12-18 08:23:02 +01:00
|
|
|
1, 0, attmap,
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
InvalidOid, &found_whole_row);
|
|
|
|
if (found_whole_row)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* we could throw an error here, but seems out of scope for this
|
|
|
|
* routine.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!equal(info1->ii_Expressions, mapped))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Partial index predicates must be identical, if they exist */
|
|
|
|
if ((info1->ii_Predicate == NULL) != (info2->ii_Predicate == NULL))
|
|
|
|
return false;
|
|
|
|
if (info1->ii_Predicate != NULL)
|
|
|
|
{
|
2018-04-26 20:47:16 +02:00
|
|
|
bool found_whole_row;
|
|
|
|
Node *mapped;
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
|
|
|
|
mapped = map_variable_attnos((Node *) info2->ii_Predicate,
|
2019-12-18 08:23:02 +01:00
|
|
|
1, 0, attmap,
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
InvalidOid, &found_whole_row);
|
|
|
|
if (found_whole_row)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* we could throw an error here, but seems out of scope for this
|
|
|
|
* routine.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!equal(info1->ii_Predicate, mapped))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No support currently for comparing exclusion indexes. */
|
|
|
|
if (info1->ii_ExclusionOps != NULL || info2->ii_ExclusionOps != NULL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
/* ----------------
|
|
|
|
* BuildSpeculativeIndexInfo
|
|
|
|
* Add extra state to IndexInfo record
|
|
|
|
*
|
|
|
|
* For unique indexes, we usually don't want to add info to the IndexInfo for
|
|
|
|
* checking uniqueness, since the B-Tree AM handles that directly. However,
|
|
|
|
* in the case of speculative insertion, additional support is required.
|
|
|
|
*
|
|
|
|
* Do this processing here rather than in BuildIndexInfo() to not incur the
|
|
|
|
* overhead in the common non-speculative cases.
|
|
|
|
* ----------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii)
|
|
|
|
{
|
2018-04-07 22:00:39 +02:00
|
|
|
int indnkeyatts;
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
int i;
|
|
|
|
|
2018-04-07 22:00:39 +02:00
|
|
|
indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index);
|
|
|
|
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
/*
|
|
|
|
* fetch info for checking unique indexes
|
|
|
|
*/
|
|
|
|
Assert(ii->ii_Unique);
|
|
|
|
|
|
|
|
if (index->rd_rel->relam != BTREE_AM_OID)
|
|
|
|
elog(ERROR, "unexpected non-btree speculative unique index");
|
|
|
|
|
2018-04-07 22:00:39 +02:00
|
|
|
ii->ii_UniqueOps = (Oid *) palloc(sizeof(Oid) * indnkeyatts);
|
|
|
|
ii->ii_UniqueProcs = (Oid *) palloc(sizeof(Oid) * indnkeyatts);
|
|
|
|
ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * indnkeyatts);
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
|
|
|
|
/*
|
2015-05-24 03:35:49 +02:00
|
|
|
* We have to look up the operator's strategy number. This provides a
|
|
|
|
* cross-check that the operator does match the index.
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
*/
|
|
|
|
/* We need the func OIDs and strategy numbers too */
|
2018-04-07 22:00:39 +02:00
|
|
|
for (i = 0; i < indnkeyatts; i++)
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
{
|
|
|
|
ii->ii_UniqueStrats[i] = BTEqualStrategyNumber;
|
|
|
|
ii->ii_UniqueOps[i] =
|
|
|
|
get_opfamily_member(index->rd_opfamily[i],
|
|
|
|
index->rd_opcintype[i],
|
|
|
|
index->rd_opcintype[i],
|
|
|
|
ii->ii_UniqueStrats[i]);
|
2017-07-24 17:23:27 +02:00
|
|
|
if (!OidIsValid(ii->ii_UniqueOps[i]))
|
|
|
|
elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
|
|
|
|
ii->ii_UniqueStrats[i], index->rd_opcintype[i],
|
|
|
|
index->rd_opcintype[i], index->rd_opfamily[i]);
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
ii->ii_UniqueProcs[i] = get_opcode(ii->ii_UniqueOps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/* ----------------
|
1997-09-07 07:04:48 +02:00
|
|
|
* FormIndexDatum
|
2005-03-21 02:24:04 +01:00
|
|
|
* Construct values[] and isnull[] arrays for a new index tuple.
|
2000-07-15 00:18:02 +02:00
|
|
|
*
|
|
|
|
* indexInfo Info about the index
|
2005-03-16 22:38:10 +01:00
|
|
|
* slot Heap tuple for which we must prepare an index entry
|
2003-05-28 18:04:02 +02:00
|
|
|
* estate executor state for evaluating any index expressions
|
2005-03-21 02:24:04 +01:00
|
|
|
* values Array of index Datums (output area)
|
|
|
|
* isnull Array of is-null indicators (output area)
|
2000-07-15 00:18:02 +02:00
|
|
|
*
|
2003-05-28 18:04:02 +02:00
|
|
|
* When there are no index expressions, estate may be NULL. Otherwise it
|
|
|
|
* must be supplied, *and* the ecxt_scantuple slot of its per-tuple expr
|
|
|
|
* context must point to the heap tuple passed in.
|
|
|
|
*
|
2005-03-21 02:24:04 +01:00
|
|
|
* Notice we don't actually call index_form_tuple() here; we just prepare
|
2014-05-06 18:12:18 +02:00
|
|
|
* its input arrays values[] and isnull[]. This is because the index AM
|
2005-03-21 02:24:04 +01:00
|
|
|
* may wish to alter the data before storage.
|
1996-07-09 08:22:35 +02:00
|
|
|
* ----------------
|
|
|
|
*/
|
|
|
|
void
|
2000-07-15 00:18:02 +02:00
|
|
|
FormIndexDatum(IndexInfo *indexInfo,
|
2005-03-16 22:38:10 +01:00
|
|
|
TupleTableSlot *slot,
|
2003-05-28 18:04:02 +02:00
|
|
|
EState *estate,
|
2005-03-21 02:24:04 +01:00
|
|
|
Datum *values,
|
|
|
|
bool *isnull)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
2004-05-26 06:41:50 +02:00
|
|
|
ListCell *indexpr_item;
|
2000-07-15 00:18:02 +02:00
|
|
|
int i;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
if (indexInfo->ii_Expressions != NIL &&
|
|
|
|
indexInfo->ii_ExpressionsState == NIL)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2003-05-28 18:04:02 +02:00
|
|
|
/* First time through, set up expression evaluation state */
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
|
|
|
indexInfo->ii_ExpressionsState =
|
|
|
|
ExecPrepareExprList(indexInfo->ii_Expressions, estate);
|
2003-05-28 18:04:02 +02:00
|
|
|
/* Check caller has set up context correctly */
|
2005-03-16 22:38:10 +01:00
|
|
|
Assert(GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
|
2003-05-28 18:04:02 +02:00
|
|
|
}
|
2004-05-26 06:41:50 +02:00
|
|
|
indexpr_item = list_head(indexInfo->ii_ExpressionsState);
|
1998-09-09 05:42:52 +02:00
|
|
|
|
2003-05-28 18:04:02 +02:00
|
|
|
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
|
|
|
|
{
|
2018-04-12 12:02:45 +02:00
|
|
|
int keycol = indexInfo->ii_IndexAttrNumbers[i];
|
2003-05-28 18:04:02 +02:00
|
|
|
Datum iDatum;
|
|
|
|
bool isNull;
|
1998-08-28 06:57:21 +02:00
|
|
|
|
Make TupleTableSlots extensible, finish split of existing slot type.
This commit completes the work prepared in 1a0586de36, splitting the
old TupleTableSlot implementation (which could store buffer, heap,
minimal and virtual slots) into four different slot types. As
described in the aforementioned commit, this is done with the goal of
making tuple table slots extensible, to allow for pluggable table
access methods.
To achieve runtime extensibility for TupleTableSlots, operations on
slots that can differ between types of slots are performed using the
TupleTableSlotOps struct provided at slot creation time. That
includes information from the size of TupleTableSlot struct to be
allocated, initialization, deforming etc. See the struct's definition
for more detailed information about callbacks TupleTableSlotOps.
I decided to rename TTSOpsBufferTuple to TTSOpsBufferHeapTuple and
ExecCopySlotTuple to ExecCopySlotHeapTuple, as that seems more
consistent with other naming introduced in recent patches.
There's plenty optimization potential in the slot implementation, but
according to benchmarking the state after this commit has similar
performance characteristics to before this set of changes, which seems
sufficient.
There's a few changes in execReplication.c that currently need to poke
through the slot abstraction, that'll be repaired once the pluggable
storage patchset provides the necessary infrastructure.
Author: Andres Freund and Ashutosh Bapat, with changes by Amit Khandekar
Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-17 01:35:11 +01:00
|
|
|
if (keycol < 0)
|
|
|
|
iDatum = slot_getsysattr(slot, keycol, &isNull);
|
|
|
|
else if (keycol != 0)
|
2000-07-15 00:18:02 +02:00
|
|
|
{
|
2003-05-28 18:04:02 +02:00
|
|
|
/*
|
|
|
|
* Plain index column; get the value we need directly from the
|
|
|
|
* heap tuple.
|
|
|
|
*/
|
2005-03-16 22:38:10 +01:00
|
|
|
iDatum = slot_getattr(slot, keycol, &isNull);
|
2000-07-15 00:18:02 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2003-05-28 18:04:02 +02:00
|
|
|
/*
|
|
|
|
* Index expression --- need to evaluate it.
|
|
|
|
*/
|
2004-05-26 06:41:50 +02:00
|
|
|
if (indexpr_item == NULL)
|
2003-05-28 18:04:02 +02:00
|
|
|
elog(ERROR, "wrong number of index expressions");
|
2004-05-26 06:41:50 +02:00
|
|
|
iDatum = ExecEvalExprSwitchContext((ExprState *) lfirst(indexpr_item),
|
2005-10-15 04:49:52 +02:00
|
|
|
GetPerTupleExprContext(estate),
|
2017-01-19 23:12:38 +01:00
|
|
|
&isNull);
|
Represent Lists as expansible arrays, not chains of cons-cells.
Originally, Postgres Lists were a more or less exact reimplementation of
Lisp lists, which consist of chains of separately-allocated cons cells,
each having a value and a next-cell link. We'd hacked that once before
(commit d0b4399d8) to add a separate List header, but the data was still
in cons cells. That makes some operations -- notably list_nth() -- O(N),
and it's bulky because of the next-cell pointers and per-cell palloc
overhead, and it's very cache-unfriendly if the cons cells end up
scattered around rather than being adjacent.
In this rewrite, we still have List headers, but the data is in a
resizable array of values, with no next-cell links. Now we need at
most two palloc's per List, and often only one, since we can allocate
some values in the same palloc call as the List header. (Of course,
extending an existing List may require repalloc's to enlarge the array.
But this involves just O(log N) allocations not O(N).)
Of course this is not without downsides. The key difficulty is that
addition or deletion of a list entry may now cause other entries to
move, which it did not before.
For example, that breaks foreach() and sister macros, which historically
used a pointer to the current cons-cell as loop state. We can repair
those macros transparently by making their actual loop state be an
integer list index; the exposed "ListCell *" pointer is no longer state
carried across loop iterations, but is just a derived value. (In
practice, modern compilers can optimize things back to having just one
loop state value, at least for simple cases with inline loop bodies.)
In principle, this is a semantics change for cases where the loop body
inserts or deletes list entries ahead of the current loop index; but
I found no such cases in the Postgres code.
The change is not at all transparent for code that doesn't use foreach()
but chases lists "by hand" using lnext(). The largest share of such
code in the backend is in loops that were maintaining "prev" and "next"
variables in addition to the current-cell pointer, in order to delete
list cells efficiently using list_delete_cell(). However, we no longer
need a previous-cell pointer to delete a list cell efficiently. Keeping
a next-cell pointer doesn't work, as explained above, but we can improve
matters by changing such code to use a regular foreach() loop and then
using the new macro foreach_delete_current() to delete the current cell.
(This macro knows how to update the associated foreach loop's state so
that no cells will be missed in the traversal.)
There remains a nontrivial risk of code assuming that a ListCell *
pointer will remain good over an operation that could now move the list
contents. To help catch such errors, list.c can be compiled with a new
define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents
whenever that could possibly happen. This makes list operations
significantly more expensive so it's not normally turned on (though it
is on by default if USE_VALGRIND is on).
There are two notable API differences from the previous code:
* lnext() now requires the List's header pointer in addition to the
current cell's address.
* list_delete_cell() no longer requires a previous-cell argument.
These changes are somewhat unfortunate, but on the other hand code using
either function needs inspection to see if it is assuming anything
it shouldn't, so it's not all bad.
Programmers should be aware of these significant performance changes:
* list_nth() and related functions are now O(1); so there's no
major access-speed difference between a list and an array.
* Inserting or deleting a list element now takes time proportional to
the distance to the end of the list, due to moving the array elements.
(However, it typically *doesn't* require palloc or pfree, so except in
long lists it's probably still faster than before.) Notably, lcons()
used to be about the same cost as lappend(), but that's no longer true
if the list is long. Code that uses lcons() and list_delete_first()
to maintain a stack might usefully be rewritten to push and pop at the
end of the list rather than the beginning.
* There are now list_insert_nth...() and list_delete_nth...() functions
that add or remove a list cell identified by index. These have the
data-movement penalty explained above, but there's no search penalty.
* list_concat() and variants now copy the second list's data into
storage belonging to the first list, so there is no longer any
sharing of cells between the input lists. The second argument is
now declared "const List *" to reflect that it isn't changed.
This patch just does the minimum needed to get the new implementation
in place and fix bugs exposed by the regression tests. As suggested
by the foregoing, there's a fair amount of followup work remaining to
do.
Also, the ENABLE_LIST_COMPAT macros are finally removed in this
commit. Code using those should have been gone a dozen years ago.
Patch by me; thanks to David Rowley, Jesper Pedersen, and others
for review.
Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
|
|
|
indexpr_item = lnext(indexInfo->ii_ExpressionsState, indexpr_item);
|
2000-07-15 00:18:02 +02:00
|
|
|
}
|
2005-03-21 02:24:04 +01:00
|
|
|
values[i] = iDatum;
|
|
|
|
isnull[i] = isNull;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
2000-07-15 00:18:02 +02:00
|
|
|
|
2004-05-26 06:41:50 +02:00
|
|
|
if (indexpr_item != NULL)
|
2003-05-28 18:04:02 +02:00
|
|
|
elog(ERROR, "wrong number of index expressions");
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
/*
|
2006-07-31 22:09:10 +02:00
|
|
|
* index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX
|
2006-05-11 01:18:39 +02:00
|
|
|
*
|
|
|
|
* This routine updates the pg_class row of either an index or its parent
|
2014-05-06 18:12:18 +02:00
|
|
|
* relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
|
2006-07-31 22:09:10 +02:00
|
|
|
* to ensure we can do all the necessary work in just one update.
|
2000-11-08 23:10:03 +01:00
|
|
|
*
|
2006-05-11 01:18:39 +02:00
|
|
|
* hasindex: set relhasindex to this value
|
2011-10-14 23:23:01 +02:00
|
|
|
* reltuples: if >= 0, set reltuples to this value; else no change
|
2001-08-10 20:57:42 +02:00
|
|
|
*
|
2011-10-14 23:23:01 +02:00
|
|
|
* If reltuples >= 0, relpages and relallvisible are also updated (using
|
|
|
|
* RelationGetNumberOfBlocks() and visibilitymap_count()).
|
2001-08-10 20:57:42 +02:00
|
|
|
*
|
2000-11-08 23:10:03 +01:00
|
|
|
* NOTE: an important side-effect of this operation is that an SI invalidation
|
|
|
|
* message is sent out to all backends --- including me --- causing relcache
|
2014-05-06 18:12:18 +02:00
|
|
|
* entries to be flushed or updated with the new data. This must happen even
|
2006-05-11 01:18:39 +02:00
|
|
|
* if we find that no change is needed in the pg_class row. When updating
|
|
|
|
* a heap entry, this ensures that other backends find out about the new
|
|
|
|
* index. When updating an index, it's important because some index AMs
|
|
|
|
* expect a relcache flush to occur after REINDEX.
|
2000-02-18 10:30:20 +01:00
|
|
|
*/
|
2006-05-11 01:18:39 +02:00
|
|
|
static void
|
2009-12-07 06:22:23 +01:00
|
|
|
index_update_stats(Relation rel,
|
2013-07-03 20:24:09 +02:00
|
|
|
bool hasindex,
|
|
|
|
double reltuples)
|
2000-02-18 10:30:20 +01:00
|
|
|
{
|
2006-05-11 01:18:39 +02:00
|
|
|
Oid relid = RelationGetRelid(rel);
|
2000-02-18 10:30:20 +01:00
|
|
|
Relation pg_class;
|
|
|
|
HeapTuple tuple;
|
2006-05-11 01:18:39 +02:00
|
|
|
Form_pg_class rd_rel;
|
|
|
|
bool dirty;
|
2000-02-18 10:30:20 +01:00
|
|
|
|
2000-11-08 23:10:03 +01:00
|
|
|
/*
|
2006-07-31 22:09:10 +02:00
|
|
|
* We always update the pg_class row using a non-transactional,
|
|
|
|
* overwrite-in-place update. There are several reasons for this:
|
2006-05-11 01:18:39 +02:00
|
|
|
*
|
2006-07-31 22:09:10 +02:00
|
|
|
* 1. In bootstrap mode, we have no choice --- UPDATE wouldn't work.
|
|
|
|
*
|
2006-10-04 02:30:14 +02:00
|
|
|
* 2. We could be reindexing pg_class itself, in which case we can't move
|
2017-03-02 12:33:50 +01:00
|
|
|
* its pg_class row because CatalogTupleInsert/CatalogTupleUpdate might
|
|
|
|
* not know about all the indexes yet (see reindex_relation).
|
2006-07-31 22:09:10 +02:00
|
|
|
*
|
|
|
|
* 3. Because we execute CREATE INDEX with just share lock on the parent
|
|
|
|
* rel (to allow concurrent index creations), an ordinary update could
|
|
|
|
* suffer a tuple-concurrently-updated failure against another CREATE
|
|
|
|
* INDEX committing at about the same time. We can avoid that by having
|
|
|
|
* them both do nontransactional updates (we assume they will both be
|
|
|
|
* trying to change the pg_class row to the same thing, so it doesn't
|
|
|
|
* matter which goes first).
|
|
|
|
*
|
|
|
|
* It is safe to use a non-transactional update even though our
|
2014-05-06 18:12:18 +02:00
|
|
|
* transaction could still fail before committing. Setting relhasindex
|
2006-07-31 22:09:10 +02:00
|
|
|
* true is safe even if there are no indexes (VACUUM will eventually fix
|
2018-04-26 20:47:16 +02:00
|
|
|
* it). And of course the new relpages and reltuples counts are correct
|
|
|
|
* regardless. However, we don't want to change relpages (or
|
|
|
|
* relallvisible) if the caller isn't providing an updated reltuples
|
|
|
|
* count, because that would bollix the reltuples/relpages ratio which is
|
|
|
|
* what's really important.
|
2000-02-18 10:30:20 +01:00
|
|
|
*/
|
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_class = table_open(RelationRelationId, RowExclusiveLock);
|
2006-05-11 01:18:39 +02:00
|
|
|
|
2006-07-31 22:09:10 +02:00
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Make a copy of the tuple to update. Normally we use the syscache, but
|
2006-10-04 02:30:14 +02:00
|
|
|
* we can't rely on that during bootstrap or while reindexing pg_class
|
|
|
|
* itself.
|
2006-07-31 22:09:10 +02:00
|
|
|
*/
|
|
|
|
if (IsBootstrapProcessingMode() ||
|
|
|
|
ReindexIsProcessingHeap(RelationRelationId))
|
2000-02-18 10:30:20 +01:00
|
|
|
{
|
2006-05-11 01:18:39 +02:00
|
|
|
/* don't assume syscache will work */
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
TableScanDesc pg_class_scan;
|
2000-02-18 10:30:20 +01:00
|
|
|
ScanKeyData key[1];
|
|
|
|
|
2003-11-12 22:15:59 +01:00
|
|
|
ScanKeyInit(&key[0],
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 00:36:57 +01:00
|
|
|
Anum_pg_class_oid,
|
2003-11-12 22:15:59 +01:00
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(relid));
|
2000-02-18 10:30:20 +01:00
|
|
|
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
pg_class_scan = table_beginscan_catalog(pg_class, 1, key);
|
2002-05-21 01:51:44 +02:00
|
|
|
tuple = heap_getnext(pg_class_scan, ForwardScanDirection);
|
2006-05-11 01:18:39 +02:00
|
|
|
tuple = heap_copytuple(tuple);
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
table_endscan(pg_class_scan);
|
2000-02-18 10:30:20 +01:00
|
|
|
}
|
2006-07-31 22:09:10 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* normal case, use syscache */
|
2010-02-14 19:42:19 +01:00
|
|
|
tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid));
|
2006-07-31 22:09:10 +02:00
|
|
|
}
|
2000-02-18 10:30:20 +01:00
|
|
|
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
2003-07-21 03:59:11 +02:00
|
|
|
elog(ERROR, "could not find tuple for relation %u", relid);
|
2006-05-11 01:18:39 +02:00
|
|
|
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
|
2000-02-18 10:30:20 +01:00
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
/* Should this be a more comprehensive test? */
|
|
|
|
Assert(rd_rel->relkind != RELKIND_PARTITIONED_INDEX);
|
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
/* Apply required updates, if any, to copied tuple */
|
2001-08-10 20:57:42 +02:00
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
dirty = false;
|
|
|
|
if (rd_rel->relhasindex != hasindex)
|
2002-03-03 18:47:56 +01:00
|
|
|
{
|
2006-05-11 01:18:39 +02:00
|
|
|
rd_rel->relhasindex = hasindex;
|
2002-03-03 18:47:56 +01:00
|
|
|
dirty = true;
|
|
|
|
}
|
2011-10-14 23:23:01 +02:00
|
|
|
|
|
|
|
if (reltuples >= 0)
|
2000-02-18 10:30:20 +01:00
|
|
|
{
|
2011-10-14 23:23:01 +02:00
|
|
|
BlockNumber relpages = RelationGetNumberOfBlocks(rel);
|
|
|
|
BlockNumber relallvisible;
|
|
|
|
|
|
|
|
if (rd_rel->relkind != RELKIND_INDEX)
|
Change the format of the VM fork to add a second bit per page.
The new bit indicates whether every tuple on the page is already frozen.
It is cleared only when the all-visible bit is cleared, and it can be
set only when we vacuum a page and find that every tuple on that page is
both visible to every transaction and in no need of any future
vacuuming.
A future commit will use this new bit to optimize away full-table scans
that would otherwise be triggered by XID wraparound considerations. A
page which is merely all-visible must still be scanned in that case, but
a page which is all-frozen need not be. This commit does not attempt
that optimization, although that optimization is the goal here. It
seems better to get the basic infrastructure in place first.
Per discussion, it's very desirable for pg_upgrade to automatically
migrate existing VM forks from the old format to the new format. That,
too, will be handled in a follow-on patch.
Masahiko Sawada, reviewed by Kyotaro Horiguchi, Fujii Masao, Amit
Kapila, Simon Riggs, Andres Freund, and others, and substantially
revised by me.
2016-03-02 03:49:41 +01:00
|
|
|
visibilitymap_count(rel, &relallvisible, NULL);
|
2017-06-21 20:39:04 +02:00
|
|
|
else /* don't bother for indexes */
|
2011-10-14 23:23:01 +02:00
|
|
|
relallvisible = 0;
|
|
|
|
|
|
|
|
if (rd_rel->relpages != (int32) relpages)
|
|
|
|
{
|
|
|
|
rd_rel->relpages = (int32) relpages;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
if (rd_rel->reltuples != (float4) reltuples)
|
|
|
|
{
|
|
|
|
rd_rel->reltuples = (float4) reltuples;
|
|
|
|
dirty = true;
|
|
|
|
}
|
|
|
|
if (rd_rel->relallvisible != (int32) relallvisible)
|
|
|
|
{
|
|
|
|
rd_rel->relallvisible = (int32) relallvisible;
|
|
|
|
dirty = true;
|
|
|
|
}
|
2006-05-11 01:18:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If anything changed, write out the tuple
|
|
|
|
*/
|
|
|
|
if (dirty)
|
|
|
|
{
|
2006-07-31 22:09:10 +02:00
|
|
|
heap_inplace_update(pg_class, tuple);
|
|
|
|
/* the above sends a cache inval message */
|
2000-11-08 23:10:03 +01:00
|
|
|
}
|
2002-03-03 18:47:56 +01:00
|
|
|
else
|
|
|
|
{
|
2006-05-11 01:18:39 +02:00
|
|
|
/* no need to change tuple, but force relcache inval anyway */
|
2004-02-10 02:55:27 +01:00
|
|
|
CacheInvalidateRelcacheByTuple(tuple);
|
2002-03-03 18:47:56 +01:00
|
|
|
}
|
2000-06-17 23:49:04 +02:00
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
heap_freetuple(tuple);
|
2000-02-18 10:30:20 +01:00
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_class, RowExclusiveLock);
|
2000-02-18 10:30:20 +01:00
|
|
|
}
|
|
|
|
|
2000-12-08 07:17:58 +01:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
/*
|
|
|
|
* index_build - invoke access-method-specific index build procedure
|
2006-05-11 01:18:39 +02:00
|
|
|
*
|
|
|
|
* On entry, the index's catalog entries are valid, and its physical disk
|
2014-05-06 18:12:18 +02:00
|
|
|
* file has been created but is empty. We call the AM-specific build
|
2006-05-11 01:18:39 +02:00
|
|
|
* procedure to fill in the index contents. We then update the pg_class
|
|
|
|
* entries of the index and heap relation as needed, using statistics
|
|
|
|
* returned by ambuild as well as data passed by the caller.
|
|
|
|
*
|
2011-04-20 00:50:56 +02:00
|
|
|
* isreindex indicates we are recreating a previously-existing index.
|
Support parallel btree index builds.
To make this work, tuplesort.c and logtape.c must also support
parallelism, so this patch adds that infrastructure and then applies
it to the particular case of parallel btree index builds. Testing
to date shows that this can often be 2-3x faster than a serial
index build.
The model for deciding how many workers to use is fairly primitive
at present, but it's better than not having the feature. We can
refine it as we get more experience.
Peter Geoghegan with some help from Rushabh Lathia. While Heikki
Linnakangas is not an author of this patch, he wrote other patches
without which this feature would not have been possible, and
therefore the release notes should possibly credit him as an author
of this feature. Reviewed by Claudio Freire, Heikki Linnakangas,
Thomas Munro, Tels, Amit Kapila, me.
Discussion: http://postgr.es/m/CAM3SWZQKM=Pzc=CAHzRixKjp2eO5Q0Jg1SoFQqeXFQ647JiwqQ@mail.gmail.com
Discussion: http://postgr.es/m/CAH2-Wz=AxWqDoVvGU7dq856S4r6sJAj6DBn7VMtigkB33N5eyg@mail.gmail.com
2018-02-02 19:25:55 +01:00
|
|
|
* parallel indicates if parallelism may be useful.
|
2011-04-20 00:50:56 +02:00
|
|
|
*
|
2006-05-11 01:18:39 +02:00
|
|
|
* Note: before Postgres 8.2, the passed-in heap and index Relations
|
|
|
|
* were automatically closed by this routine. This is no longer the case.
|
|
|
|
* The caller opened 'em, and the caller should close 'em.
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
index_build(Relation heapRelation,
|
|
|
|
Relation indexRelation,
|
2006-05-11 01:18:39 +02:00
|
|
|
IndexInfo *indexInfo,
|
Support parallel btree index builds.
To make this work, tuplesort.c and logtape.c must also support
parallelism, so this patch adds that infrastructure and then applies
it to the particular case of parallel btree index builds. Testing
to date shows that this can often be 2-3x faster than a serial
index build.
The model for deciding how many workers to use is fairly primitive
at present, but it's better than not having the feature. We can
refine it as we get more experience.
Peter Geoghegan with some help from Rushabh Lathia. While Heikki
Linnakangas is not an author of this patch, he wrote other patches
without which this feature would not have been possible, and
therefore the release notes should possibly credit him as an author
of this feature. Reviewed by Claudio Freire, Heikki Linnakangas,
Thomas Munro, Tels, Amit Kapila, me.
Discussion: http://postgr.es/m/CAM3SWZQKM=Pzc=CAHzRixKjp2eO5Q0Jg1SoFQqeXFQ647JiwqQ@mail.gmail.com
Discussion: http://postgr.es/m/CAH2-Wz=AxWqDoVvGU7dq856S4r6sJAj6DBn7VMtigkB33N5eyg@mail.gmail.com
2018-02-02 19:25:55 +01:00
|
|
|
bool isreindex,
|
|
|
|
bool parallel)
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
{
|
2006-05-11 01:18:39 +02:00
|
|
|
IndexBuildResult *stats;
|
2008-01-03 22:23:15 +01:00
|
|
|
Oid save_userid;
|
Prevent indirect security attacks via changing session-local state within
an allegedly immutable index function. It was previously recognized that
we had to prevent such a function from executing SET/RESET ROLE/SESSION
AUTHORIZATION, or it could trivially obtain the privileges of the session
user. However, since there is in general no privilege checking for changes
of session-local state, it is also possible for such a function to change
settings in a way that might subvert later operations in the same session.
Examples include changing search_path to cause an unexpected function to
be called, or replacing an existing prepared statement with another one
that will execute a function of the attacker's choosing.
The present patch secures VACUUM, ANALYZE, and CREATE INDEX/REINDEX against
these threats, which are the same places previously deemed to need protection
against the SET ROLE issue. GUC changes are still allowed, since there are
many useful cases for that, but we prevent security problems by forcing a
rollback of any GUC change after completing the operation. Other cases are
handled by throwing an error if any change is attempted; these include temp
table creation, closing a cursor, and creating or deleting a prepared
statement. (In 7.4, the infrastructure to roll back GUC changes doesn't
exist, so we settle for rejecting changes of "search_path" in these contexts.)
Original report and patch by Gurjeet Singh, additional analysis by
Tom Lane.
Security: CVE-2009-4136
2009-12-09 22:57:51 +01:00
|
|
|
int save_sec_context;
|
|
|
|
int save_nestlevel;
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* sanity checks
|
|
|
|
*/
|
|
|
|
Assert(RelationIsValid(indexRelation));
|
2019-01-22 02:36:55 +01:00
|
|
|
Assert(PointerIsValid(indexRelation->rd_indam));
|
|
|
|
Assert(PointerIsValid(indexRelation->rd_indam->ambuild));
|
|
|
|
Assert(PointerIsValid(indexRelation->rd_indam->ambuildempty));
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
|
Support parallel btree index builds.
To make this work, tuplesort.c and logtape.c must also support
parallelism, so this patch adds that infrastructure and then applies
it to the particular case of parallel btree index builds. Testing
to date shows that this can often be 2-3x faster than a serial
index build.
The model for deciding how many workers to use is fairly primitive
at present, but it's better than not having the feature. We can
refine it as we get more experience.
Peter Geoghegan with some help from Rushabh Lathia. While Heikki
Linnakangas is not an author of this patch, he wrote other patches
without which this feature would not have been possible, and
therefore the release notes should possibly credit him as an author
of this feature. Reviewed by Claudio Freire, Heikki Linnakangas,
Thomas Munro, Tels, Amit Kapila, me.
Discussion: http://postgr.es/m/CAM3SWZQKM=Pzc=CAHzRixKjp2eO5Q0Jg1SoFQqeXFQ647JiwqQ@mail.gmail.com
Discussion: http://postgr.es/m/CAH2-Wz=AxWqDoVvGU7dq856S4r6sJAj6DBn7VMtigkB33N5eyg@mail.gmail.com
2018-02-02 19:25:55 +01:00
|
|
|
/*
|
|
|
|
* Determine worker process details for parallel CREATE INDEX. Currently,
|
|
|
|
* only btree has support for parallel builds.
|
|
|
|
*
|
|
|
|
* Note that planner considers parallel safety for us.
|
|
|
|
*/
|
|
|
|
if (parallel && IsNormalProcessingMode() &&
|
|
|
|
indexRelation->rd_rel->relam == BTREE_AM_OID)
|
|
|
|
indexInfo->ii_ParallelWorkers =
|
|
|
|
plan_create_index_workers(RelationGetRelid(heapRelation),
|
|
|
|
RelationGetRelid(indexRelation));
|
|
|
|
|
|
|
|
if (indexInfo->ii_ParallelWorkers == 0)
|
|
|
|
ereport(DEBUG1,
|
|
|
|
(errmsg("building index \"%s\" on table \"%s\" serially",
|
|
|
|
RelationGetRelationName(indexRelation),
|
|
|
|
RelationGetRelationName(heapRelation))));
|
|
|
|
else
|
|
|
|
ereport(DEBUG1,
|
|
|
|
(errmsg_plural("building index \"%s\" on table \"%s\" with request for %d parallel worker",
|
|
|
|
"building index \"%s\" on table \"%s\" with request for %d parallel workers",
|
|
|
|
indexInfo->ii_ParallelWorkers,
|
|
|
|
RelationGetRelationName(indexRelation),
|
|
|
|
RelationGetRelationName(heapRelation),
|
|
|
|
indexInfo->ii_ParallelWorkers)));
|
2011-02-12 14:27:55 +01:00
|
|
|
|
2008-01-03 22:23:15 +01:00
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* Switch to the table owner's userid, so that any index functions are run
|
Prevent indirect security attacks via changing session-local state within
an allegedly immutable index function. It was previously recognized that
we had to prevent such a function from executing SET/RESET ROLE/SESSION
AUTHORIZATION, or it could trivially obtain the privileges of the session
user. However, since there is in general no privilege checking for changes
of session-local state, it is also possible for such a function to change
settings in a way that might subvert later operations in the same session.
Examples include changing search_path to cause an unexpected function to
be called, or replacing an existing prepared statement with another one
that will execute a function of the attacker's choosing.
The present patch secures VACUUM, ANALYZE, and CREATE INDEX/REINDEX against
these threats, which are the same places previously deemed to need protection
against the SET ROLE issue. GUC changes are still allowed, since there are
many useful cases for that, but we prevent security problems by forcing a
rollback of any GUC change after completing the operation. Other cases are
handled by throwing an error if any change is attempted; these include temp
table creation, closing a cursor, and creating or deleting a prepared
statement. (In 7.4, the infrastructure to roll back GUC changes doesn't
exist, so we settle for rejecting changes of "search_path" in these contexts.)
Original report and patch by Gurjeet Singh, additional analysis by
Tom Lane.
Security: CVE-2009-4136
2009-12-09 22:57:51 +01:00
|
|
|
* as that user. Also lock down security-restricted operations and
|
|
|
|
* arrange to make GUC variable changes local to this command.
|
2008-01-03 22:23:15 +01:00
|
|
|
*/
|
Prevent indirect security attacks via changing session-local state within
an allegedly immutable index function. It was previously recognized that
we had to prevent such a function from executing SET/RESET ROLE/SESSION
AUTHORIZATION, or it could trivially obtain the privileges of the session
user. However, since there is in general no privilege checking for changes
of session-local state, it is also possible for such a function to change
settings in a way that might subvert later operations in the same session.
Examples include changing search_path to cause an unexpected function to
be called, or replacing an existing prepared statement with another one
that will execute a function of the attacker's choosing.
The present patch secures VACUUM, ANALYZE, and CREATE INDEX/REINDEX against
these threats, which are the same places previously deemed to need protection
against the SET ROLE issue. GUC changes are still allowed, since there are
many useful cases for that, but we prevent security problems by forcing a
rollback of any GUC change after completing the operation. Other cases are
handled by throwing an error if any change is attempted; these include temp
table creation, closing a cursor, and creating or deleting a prepared
statement. (In 7.4, the infrastructure to roll back GUC changes doesn't
exist, so we settle for rejecting changes of "search_path" in these contexts.)
Original report and patch by Gurjeet Singh, additional analysis by
Tom Lane.
Security: CVE-2009-4136
2009-12-09 22:57:51 +01:00
|
|
|
GetUserIdAndSecContext(&save_userid, &save_sec_context);
|
|
|
|
SetUserIdAndSecContext(heapRelation->rd_rel->relowner,
|
|
|
|
save_sec_context | SECURITY_RESTRICTED_OPERATION);
|
|
|
|
save_nestlevel = NewGUCNestLevel();
|
2008-01-03 22:23:15 +01:00
|
|
|
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
/* Set up initial progress report status */
|
|
|
|
{
|
|
|
|
const int index[] = {
|
|
|
|
PROGRESS_CREATEIDX_PHASE,
|
|
|
|
PROGRESS_CREATEIDX_SUBPHASE,
|
|
|
|
PROGRESS_CREATEIDX_TUPLES_DONE,
|
|
|
|
PROGRESS_CREATEIDX_TUPLES_TOTAL,
|
|
|
|
PROGRESS_SCAN_BLOCKS_DONE,
|
|
|
|
PROGRESS_SCAN_BLOCKS_TOTAL
|
|
|
|
};
|
2019-05-22 18:55:34 +02:00
|
|
|
const int64 val[] = {
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
PROGRESS_CREATEIDX_PHASE_BUILD,
|
|
|
|
PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE,
|
|
|
|
0, 0, 0, 0
|
|
|
|
};
|
|
|
|
|
|
|
|
pgstat_progress_update_multi_param(6, index, val);
|
|
|
|
}
|
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
/*
|
|
|
|
* Call the access method's build procedure
|
|
|
|
*/
|
2019-01-22 02:36:55 +01:00
|
|
|
stats = indexRelation->rd_indam->ambuild(heapRelation, indexRelation,
|
|
|
|
indexInfo);
|
2006-05-11 01:18:39 +02:00
|
|
|
Assert(PointerIsValid(stats));
|
|
|
|
|
2010-12-29 12:48:53 +01:00
|
|
|
/*
|
2011-06-02 19:28:52 +02:00
|
|
|
* If this is an unlogged index, we may need to write out an init fork for
|
|
|
|
* it -- but we must first check whether one already exists. If, for
|
|
|
|
* example, an unlogged relation is truncated in the transaction that
|
|
|
|
* created it, or truncated twice in a subsequent transaction, the
|
|
|
|
* relfilenode won't change, and nothing needs to be done here.
|
2010-12-29 12:48:53 +01:00
|
|
|
*/
|
2014-11-15 05:19:49 +01:00
|
|
|
if (indexRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
|
2011-06-06 04:30:04 +02:00
|
|
|
!smgrexists(indexRelation->rd_smgr, INIT_FORKNUM))
|
2010-12-29 12:48:53 +01:00
|
|
|
{
|
|
|
|
RelationOpenSmgr(indexRelation);
|
|
|
|
smgrcreate(indexRelation->rd_smgr, INIT_FORKNUM, false);
|
2019-01-22 02:36:55 +01:00
|
|
|
indexRelation->rd_indam->ambuildempty(indexRelation);
|
2010-12-29 12:48:53 +01:00
|
|
|
}
|
|
|
|
|
2007-09-20 19:56:33 +02:00
|
|
|
/*
|
2007-11-15 22:14:46 +01:00
|
|
|
* If we found any potentially broken HOT chains, mark the index as not
|
|
|
|
* being usable until the current transaction is below the event horizon.
|
2016-06-10 16:25:31 +02:00
|
|
|
* See src/backend/access/heap/README.HOT for discussion. Also set this
|
|
|
|
* if early pruning/vacuuming is enabled for the heap relation. While it
|
|
|
|
* might become safe to use the index earlier based on actual cleanup
|
|
|
|
* activity and other active transactions, the test for that would be much
|
|
|
|
* more complex and would require some form of blocking, so keep it simple
|
|
|
|
* and fast by just using the current transaction.
|
2011-04-20 00:50:56 +02:00
|
|
|
*
|
|
|
|
* However, when reindexing an existing index, we should do nothing here.
|
|
|
|
* Any HOT chains that are broken with respect to the index must predate
|
|
|
|
* the index's original creation, so there is no need to change the
|
2011-06-09 20:32:50 +02:00
|
|
|
* index's usability horizon. Moreover, we *must not* try to change the
|
|
|
|
* index's pg_index entry while reindexing pg_index itself, and this
|
2016-06-10 16:25:31 +02:00
|
|
|
* optimization nicely prevents that. The more complex rules needed for a
|
|
|
|
* reindex are handled separately after this function returns.
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
*
|
|
|
|
* We also need not set indcheckxmin during a concurrent index build,
|
|
|
|
* because we won't set indisvalid true until all transactions that care
|
2016-06-10 16:25:31 +02:00
|
|
|
* about the broken HOT chains or early pruning/vacuuming are gone.
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
*
|
|
|
|
* Therefore, this code path can only be taken during non-concurrent
|
|
|
|
* CREATE INDEX. Thus the fact that heap_update will set the pg_index
|
|
|
|
* tuple's xmin doesn't matter, because that tuple was created in the
|
2014-05-06 18:12:18 +02:00
|
|
|
* current transaction anyway. That also means we don't need to worry
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* about any concurrent readers of the tuple; no other transaction can see
|
|
|
|
* it yet.
|
|
|
|
*/
|
2016-06-10 16:25:31 +02:00
|
|
|
if ((indexInfo->ii_BrokenHotChain || EarlyPruningEnabled(heapRelation)) &&
|
|
|
|
!isreindex &&
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
!indexInfo->ii_Concurrent)
|
2007-09-20 19:56:33 +02:00
|
|
|
{
|
2007-11-15 22:14:46 +01:00
|
|
|
Oid indexId = RelationGetRelid(indexRelation);
|
|
|
|
Relation pg_index;
|
|
|
|
HeapTuple indexTuple;
|
2007-09-20 19:56:33 +02:00
|
|
|
Form_pg_index indexForm;
|
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_index = table_open(IndexRelationId, RowExclusiveLock);
|
2007-09-20 19:56:33 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
indexTuple = SearchSysCacheCopy1(INDEXRELID,
|
|
|
|
ObjectIdGetDatum(indexId));
|
2007-09-20 19:56:33 +02:00
|
|
|
if (!HeapTupleIsValid(indexTuple))
|
|
|
|
elog(ERROR, "cache lookup failed for index %u", indexId);
|
|
|
|
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
|
|
|
|
2011-04-20 00:50:56 +02:00
|
|
|
/* If it's a new index, indcheckxmin shouldn't be set ... */
|
|
|
|
Assert(!indexForm->indcheckxmin);
|
|
|
|
|
2007-09-20 19:56:33 +02:00
|
|
|
indexForm->indcheckxmin = true;
|
2017-01-31 22:42:24 +01:00
|
|
|
CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple);
|
2007-09-20 19:56:33 +02:00
|
|
|
|
|
|
|
heap_freetuple(indexTuple);
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_index, RowExclusiveLock);
|
2007-09-20 19:56:33 +02:00
|
|
|
}
|
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
/*
|
|
|
|
* Update heap and index pg_class rows
|
|
|
|
*/
|
|
|
|
index_update_stats(heapRelation,
|
|
|
|
true,
|
|
|
|
stats->heap_tuples);
|
|
|
|
|
|
|
|
index_update_stats(indexRelation,
|
|
|
|
false,
|
|
|
|
stats->index_tuples);
|
|
|
|
|
2011-06-06 04:30:04 +02:00
|
|
|
/* Make the updated catalog row versions visible */
|
2006-05-11 01:18:39 +02:00
|
|
|
CommandCounterIncrement();
|
2011-06-06 04:30:04 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it's for an exclusion constraint, make a second pass over the heap
|
2014-05-06 18:12:18 +02:00
|
|
|
* to verify that the constraint is satisfied. We must not do this until
|
2011-06-06 04:30:04 +02:00
|
|
|
* the index is fully valid. (Broken HOT chains shouldn't matter, though;
|
|
|
|
* see comments for IndexCheckExclusion.)
|
|
|
|
*/
|
|
|
|
if (indexInfo->ii_ExclusionOps != NULL)
|
|
|
|
IndexCheckExclusion(heapRelation, indexRelation, indexInfo);
|
|
|
|
|
|
|
|
/* Roll back any GUC changes executed by index functions */
|
|
|
|
AtEOXact_GUC(false, save_nestlevel);
|
|
|
|
|
|
|
|
/* Restore userid and security context */
|
|
|
|
SetUserIdAndSecContext(save_userid, save_sec_context);
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
}
|
|
|
|
|
2009-12-07 06:22:23 +01:00
|
|
|
/*
|
|
|
|
* IndexCheckExclusion - verify that a new exclusion constraint is satisfied
|
|
|
|
*
|
|
|
|
* When creating an exclusion constraint, we first build the index normally
|
|
|
|
* and then rescan the heap to check for conflicts. We assume that we only
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
* need to validate tuples that are live according to an up-to-date snapshot,
|
|
|
|
* and that these were correctly indexed even in the presence of broken HOT
|
|
|
|
* chains. This should be OK since we are holding at least ShareLock on the
|
|
|
|
* table, meaning there can be no uncommitted updates from other transactions.
|
2009-12-07 06:22:23 +01:00
|
|
|
* (Note: that wouldn't necessarily work for system catalogs, since many
|
|
|
|
* operations release write lock early on the system catalogs.)
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
IndexCheckExclusion(Relation heapRelation,
|
|
|
|
Relation indexRelation,
|
|
|
|
IndexInfo *indexInfo)
|
|
|
|
{
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
TableScanDesc scan;
|
2009-12-07 06:22:23 +01:00
|
|
|
Datum values[INDEX_MAX_KEYS];
|
|
|
|
bool isnull[INDEX_MAX_KEYS];
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
|
|
|
ExprState *predicate;
|
2009-12-07 06:22:23 +01:00
|
|
|
TupleTableSlot *slot;
|
|
|
|
EState *estate;
|
|
|
|
ExprContext *econtext;
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
Snapshot snapshot;
|
2009-12-07 06:22:23 +01:00
|
|
|
|
2011-06-06 04:30:04 +02:00
|
|
|
/*
|
|
|
|
* If we are reindexing the target index, mark it as no longer being
|
2011-06-09 20:32:50 +02:00
|
|
|
* reindexed, to forestall an Assert in index_beginscan when we try to use
|
|
|
|
* the index for probes. This is OK because the index is now fully valid.
|
2011-06-06 04:30:04 +02:00
|
|
|
*/
|
|
|
|
if (ReindexIsCurrentlyProcessingIndex(RelationGetRelid(indexRelation)))
|
|
|
|
ResetReindexProcessing();
|
|
|
|
|
2009-12-07 06:22:23 +01:00
|
|
|
/*
|
|
|
|
* Need an EState for evaluation of index expressions and partial-index
|
2014-05-06 18:12:18 +02:00
|
|
|
* predicates. Also a slot to hold the current tuple.
|
2009-12-07 06:22:23 +01:00
|
|
|
*/
|
|
|
|
estate = CreateExecutorState();
|
|
|
|
econtext = GetPerTupleExprContext(estate);
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
slot = table_slot_create(heapRelation, NULL);
|
2009-12-07 06:22:23 +01:00
|
|
|
|
|
|
|
/* Arrange for econtext's scan tuple to be the tuple under test */
|
|
|
|
econtext->ecxt_scantuple = slot;
|
|
|
|
|
|
|
|
/* Set up execution state for predicate, if any. */
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
|
|
|
predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
|
2009-12-07 06:22:23 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan all live tuples in the base relation.
|
|
|
|
*/
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
snapshot = RegisterSnapshot(GetLatestSnapshot());
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
scan = table_beginscan_strat(heapRelation, /* relation */
|
|
|
|
snapshot, /* snapshot */
|
|
|
|
0, /* number of keys */
|
|
|
|
NULL, /* scan key */
|
|
|
|
true, /* buffer access strategy OK */
|
|
|
|
true); /* syncscan OK */
|
|
|
|
|
|
|
|
while (table_scan_getnextslot(scan, ForwardScanDirection, slot))
|
2009-12-07 06:22:23 +01:00
|
|
|
{
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In a partial index, ignore tuples that don't satisfy the predicate.
|
|
|
|
*/
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
|
|
|
if (predicate != NULL)
|
2009-12-07 06:22:23 +01:00
|
|
|
{
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
|
|
|
if (!ExecQual(predicate, econtext))
|
2009-12-07 06:22:23 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract index column values, including computing expressions.
|
|
|
|
*/
|
|
|
|
FormIndexDatum(indexInfo,
|
|
|
|
slot,
|
|
|
|
estate,
|
|
|
|
values,
|
|
|
|
isnull);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that this tuple has no conflicts.
|
|
|
|
*/
|
|
|
|
check_exclusion_constraint(heapRelation,
|
|
|
|
indexRelation, indexInfo,
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
&(slot->tts_tid), values, isnull,
|
Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
2015-05-08 05:31:36 +02:00
|
|
|
estate, true);
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
|
|
|
|
MemoryContextReset(econtext->ecxt_per_tuple_memory);
|
2009-12-07 06:22:23 +01:00
|
|
|
}
|
|
|
|
|
tableam: Add and use scan APIs.
Too allow table accesses to be not directly dependent on heap, several
new abstractions are needed. Specifically:
1) Heap scans need to be generalized into table scans. Do this by
introducing TableScanDesc, which will be the "base class" for
individual AMs. This contains the AM independent fields from
HeapScanDesc.
The previous heap_{beginscan,rescan,endscan} et al. have been
replaced with a table_ version.
There's no direct replacement for heap_getnext(), as that returned
a HeapTuple, which is undesirable for a other AMs. Instead there's
table_scan_getnextslot(). But note that heap_getnext() lives on,
it's still used widely to access catalog tables.
This is achieved by new scan_begin, scan_end, scan_rescan,
scan_getnextslot callbacks.
2) The portion of parallel scans that's shared between backends need
to be able to do so without the user doing per-AM work. To achieve
that new parallelscan_{estimate, initialize, reinitialize}
callbacks are introduced, which operate on a new
ParallelTableScanDesc, which again can be subclassed by AMs.
As it is likely that several AMs are going to be block oriented,
block oriented callbacks that can be shared between such AMs are
provided and used by heap. table_block_parallelscan_{estimate,
intiialize, reinitialize} as callbacks, and
table_block_parallelscan_{nextpage, init} for use in AMs. These
operate on a ParallelBlockTableScanDesc.
3) Index scans need to be able to access tables to return a tuple, and
there needs to be state across individual accesses to the heap to
store state like buffers. That's now handled by introducing a
sort-of-scan IndexFetchTable, which again is intended to be
subclassed by individual AMs (for heap IndexFetchHeap).
The relevant callbacks for an AM are index_fetch_{end, begin,
reset} to create the necessary state, and index_fetch_tuple to
retrieve an indexed tuple. Note that index_fetch_tuple
implementations need to be smarter than just blindly fetching the
tuples for AMs that have optimizations similar to heap's HOT - the
currently alive tuple in the update chain needs to be fetched if
appropriate.
Similar to table_scan_getnextslot(), it's undesirable to continue
to return HeapTuples. Thus index_fetch_heap (might want to rename
that later) now accepts a slot as an argument. Core code doesn't
have a lot of call sites performing index scans without going
through the systable_* API (in contrast to loads of heap_getnext
calls and working directly with HeapTuples).
Index scans now store the result of a search in
IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the
target is not generally a HeapTuple anymore that seems cleaner.
To be able to sensible adapt code to use the above, two further
callbacks have been introduced:
a) slot_callbacks returns a TupleTableSlotOps* suitable for creating
slots capable of holding a tuple of the AMs
type. table_slot_callbacks() and table_slot_create() are based
upon that, but have additional logic to deal with views, foreign
tables, etc.
While this change could have been done separately, nearly all the
call sites that needed to be adapted for the rest of this commit
also would have been needed to be adapted for
table_slot_callbacks(), making separation not worthwhile.
b) tuple_satisfies_snapshot checks whether the tuple in a slot is
currently visible according to a snapshot. That's required as a few
places now don't have a buffer + HeapTuple around, but a
slot (which in heap's case internally has that information).
Additionally a few infrastructure changes were needed:
I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now
internally uses a slot to keep track of tuples. While
systable_getnext() still returns HeapTuples, and will so for the
foreseeable future, the index API (see 1) above) now only deals with
slots.
The remainder, and largest part, of this commit is then adjusting all
scans in postgres to use the new APIs.
Author: Andres Freund, Haribabu Kommi, Alvaro Herrera
Discussion:
https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
|
|
|
table_endscan(scan);
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
UnregisterSnapshot(snapshot);
|
2009-12-07 06:22:23 +01:00
|
|
|
|
|
|
|
ExecDropSingleTupleTableSlot(slot);
|
|
|
|
|
|
|
|
FreeExecutorState(estate);
|
|
|
|
|
|
|
|
/* These may have been pointing to the now-gone estate */
|
|
|
|
indexInfo->ii_ExpressionsState = NIL;
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 23:45:36 +01:00
|
|
|
indexInfo->ii_PredicateState = NULL;
|
2009-12-07 06:22:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-08-25 06:06:58 +02:00
|
|
|
/*
|
|
|
|
* validate_index - support code for concurrent index builds
|
|
|
|
*
|
2007-09-20 19:56:33 +02:00
|
|
|
* We do a concurrent index build by first inserting the catalog entry for the
|
|
|
|
* index via index_create(), marking it not indisready and not indisvalid.
|
|
|
|
* Then we commit our transaction and start a new one, then we wait for all
|
2014-05-06 18:12:18 +02:00
|
|
|
* transactions that could have been modifying the table to terminate. Now
|
2007-09-20 19:56:33 +02:00
|
|
|
* we know that any subsequently-started transactions will see the index and
|
|
|
|
* honor its constraints on HOT updates; so while existing HOT-chains might
|
|
|
|
* be broken with respect to the index, no currently live tuple will have an
|
2014-05-06 18:12:18 +02:00
|
|
|
* incompatible HOT update done to it. We now build the index normally via
|
2007-09-20 19:56:33 +02:00
|
|
|
* index_build(), while holding a weak lock that allows concurrent
|
2006-08-25 06:06:58 +02:00
|
|
|
* insert/update/delete. Also, we index only tuples that are valid
|
2019-03-28 03:59:06 +01:00
|
|
|
* as of the start of the scan (see table_index_build_scan), whereas a normal
|
2006-08-25 06:06:58 +02:00
|
|
|
* build takes care to include recently-dead tuples. This is OK because
|
|
|
|
* we won't mark the index valid until all transactions that might be able
|
|
|
|
* to see those tuples are gone. The reason for doing that is to avoid
|
|
|
|
* bogus unique-index failures due to concurrent UPDATEs (we might see
|
|
|
|
* different versions of the same row as being valid when we pass over them,
|
|
|
|
* if we used HeapTupleSatisfiesVacuum). This leaves us with an index that
|
|
|
|
* does not contain any tuples added to the table while we built the index.
|
|
|
|
*
|
2007-09-20 19:56:33 +02:00
|
|
|
* Next, we mark the index "indisready" (but still not "indisvalid") and
|
|
|
|
* commit the second transaction and start a third. Again we wait for all
|
2014-05-06 18:12:18 +02:00
|
|
|
* transactions that could have been modifying the table to terminate. Now
|
2007-09-20 19:56:33 +02:00
|
|
|
* we know that any subsequently-started transactions will see the index and
|
2006-08-25 06:06:58 +02:00
|
|
|
* insert their new tuples into it. We then take a new reference snapshot
|
|
|
|
* which is passed to validate_index(). Any tuples that are valid according
|
|
|
|
* to this snap, but are not in the index, must be added to the index.
|
|
|
|
* (Any tuples committed live after the snap will be inserted into the
|
2014-05-06 18:12:18 +02:00
|
|
|
* index by their originating transaction. Any tuples committed dead before
|
2006-08-25 06:06:58 +02:00
|
|
|
* the snap need not be indexed, because we will wait out all transactions
|
|
|
|
* that might care about them before we mark the index valid.)
|
|
|
|
*
|
|
|
|
* validate_index() works by first gathering all the TIDs currently in the
|
|
|
|
* index, using a bulkdelete callback that just stores the TIDs and doesn't
|
|
|
|
* ever say "delete it". (This should be faster than a plain indexscan;
|
|
|
|
* also, not all index AMs support full-index indexscan.) Then we sort the
|
|
|
|
* TIDs, and finally scan the table doing a "merge join" against the TID list
|
2014-05-06 18:12:18 +02:00
|
|
|
* to see which tuples are missing from the index. Thus we will ensure that
|
2006-08-25 06:06:58 +02:00
|
|
|
* all tuples valid according to the reference snapshot are in the index.
|
|
|
|
*
|
|
|
|
* Building a unique index this way is tricky: we might try to insert a
|
|
|
|
* tuple that is already dead or is in process of being deleted, and we
|
|
|
|
* mustn't have a uniqueness failure against an updated version of the same
|
2007-09-20 19:56:33 +02:00
|
|
|
* row. We could try to check the tuple to see if it's already dead and tell
|
2006-08-25 06:06:58 +02:00
|
|
|
* index_insert() not to do the uniqueness check, but that still leaves us
|
|
|
|
* with a race condition against an in-progress update. To handle that,
|
|
|
|
* we expect the index AM to recheck liveness of the to-be-inserted tuple
|
|
|
|
* before it declares a uniqueness error.
|
|
|
|
*
|
|
|
|
* After completing validate_index(), we wait until all transactions that
|
|
|
|
* were alive at the time of the reference snapshot are gone; this is
|
2010-09-11 20:38:58 +02:00
|
|
|
* necessary to be sure there are none left with a transaction snapshot
|
2006-08-25 06:06:58 +02:00
|
|
|
* older than the reference (and hence possibly able to see tuples we did
|
2014-05-06 18:12:18 +02:00
|
|
|
* not index). Then we mark the index "indisvalid" and commit. Subsequent
|
2007-09-20 19:56:33 +02:00
|
|
|
* transactions will be able to use it for queries.
|
2006-08-25 06:06:58 +02:00
|
|
|
*
|
|
|
|
* Doing two full table scans is a brute-force strategy. We could try to be
|
|
|
|
* cleverer, eg storing new tuples in a special area of the table (perhaps
|
|
|
|
* making the table append-only by setting use_fsm). However that would
|
|
|
|
* add yet more locking issues.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
|
|
|
|
{
|
2006-10-04 02:30:14 +02:00
|
|
|
Relation heapRelation,
|
|
|
|
indexRelation;
|
|
|
|
IndexInfo *indexInfo;
|
2006-08-25 06:06:58 +02:00
|
|
|
IndexVacuumInfo ivinfo;
|
2019-03-28 03:59:06 +01:00
|
|
|
ValidateIndexState state;
|
2008-01-03 22:23:15 +01:00
|
|
|
Oid save_userid;
|
Prevent indirect security attacks via changing session-local state within
an allegedly immutable index function. It was previously recognized that
we had to prevent such a function from executing SET/RESET ROLE/SESSION
AUTHORIZATION, or it could trivially obtain the privileges of the session
user. However, since there is in general no privilege checking for changes
of session-local state, it is also possible for such a function to change
settings in a way that might subvert later operations in the same session.
Examples include changing search_path to cause an unexpected function to
be called, or replacing an existing prepared statement with another one
that will execute a function of the attacker's choosing.
The present patch secures VACUUM, ANALYZE, and CREATE INDEX/REINDEX against
these threats, which are the same places previously deemed to need protection
against the SET ROLE issue. GUC changes are still allowed, since there are
many useful cases for that, but we prevent security problems by forcing a
rollback of any GUC change after completing the operation. Other cases are
handled by throwing an error if any change is attempted; these include temp
table creation, closing a cursor, and creating or deleting a prepared
statement. (In 7.4, the infrastructure to roll back GUC changes doesn't
exist, so we settle for rejecting changes of "search_path" in these contexts.)
Original report and patch by Gurjeet Singh, additional analysis by
Tom Lane.
Security: CVE-2009-4136
2009-12-09 22:57:51 +01:00
|
|
|
int save_sec_context;
|
|
|
|
int save_nestlevel;
|
2006-08-25 06:06:58 +02:00
|
|
|
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
{
|
|
|
|
const int index[] = {
|
|
|
|
PROGRESS_CREATEIDX_PHASE,
|
|
|
|
PROGRESS_CREATEIDX_TUPLES_DONE,
|
|
|
|
PROGRESS_CREATEIDX_TUPLES_TOTAL,
|
|
|
|
PROGRESS_SCAN_BLOCKS_DONE,
|
|
|
|
PROGRESS_SCAN_BLOCKS_TOTAL
|
|
|
|
};
|
2019-05-22 18:55:34 +02:00
|
|
|
const int64 val[] = {
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
|
|
|
|
0, 0, 0, 0
|
|
|
|
};
|
2019-05-22 18:55:34 +02:00
|
|
|
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
pgstat_progress_update_multi_param(5, index, val);
|
|
|
|
}
|
|
|
|
|
2006-08-25 06:06:58 +02:00
|
|
|
/* Open and lock the parent heap relation */
|
2019-01-21 19:32:19 +01:00
|
|
|
heapRelation = table_open(heapId, ShareUpdateExclusiveLock);
|
2006-08-25 06:06:58 +02:00
|
|
|
/* And the target index relation */
|
|
|
|
indexRelation = index_open(indexId, RowExclusiveLock);
|
|
|
|
|
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Fetch info needed for index_insert. (You might think this should be
|
2006-10-04 02:30:14 +02:00
|
|
|
* passed in from DefineIndex, but its copy is long gone due to having
|
|
|
|
* been built in a previous transaction.)
|
2006-08-25 06:06:58 +02:00
|
|
|
*/
|
|
|
|
indexInfo = BuildIndexInfo(indexRelation);
|
|
|
|
|
|
|
|
/* mark build is concurrent just for consistency */
|
|
|
|
indexInfo->ii_Concurrent = true;
|
|
|
|
|
2008-01-03 22:23:15 +01:00
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* Switch to the table owner's userid, so that any index functions are run
|
Prevent indirect security attacks via changing session-local state within
an allegedly immutable index function. It was previously recognized that
we had to prevent such a function from executing SET/RESET ROLE/SESSION
AUTHORIZATION, or it could trivially obtain the privileges of the session
user. However, since there is in general no privilege checking for changes
of session-local state, it is also possible for such a function to change
settings in a way that might subvert later operations in the same session.
Examples include changing search_path to cause an unexpected function to
be called, or replacing an existing prepared statement with another one
that will execute a function of the attacker's choosing.
The present patch secures VACUUM, ANALYZE, and CREATE INDEX/REINDEX against
these threats, which are the same places previously deemed to need protection
against the SET ROLE issue. GUC changes are still allowed, since there are
many useful cases for that, but we prevent security problems by forcing a
rollback of any GUC change after completing the operation. Other cases are
handled by throwing an error if any change is attempted; these include temp
table creation, closing a cursor, and creating or deleting a prepared
statement. (In 7.4, the infrastructure to roll back GUC changes doesn't
exist, so we settle for rejecting changes of "search_path" in these contexts.)
Original report and patch by Gurjeet Singh, additional analysis by
Tom Lane.
Security: CVE-2009-4136
2009-12-09 22:57:51 +01:00
|
|
|
* as that user. Also lock down security-restricted operations and
|
|
|
|
* arrange to make GUC variable changes local to this command.
|
2008-01-03 22:23:15 +01:00
|
|
|
*/
|
Prevent indirect security attacks via changing session-local state within
an allegedly immutable index function. It was previously recognized that
we had to prevent such a function from executing SET/RESET ROLE/SESSION
AUTHORIZATION, or it could trivially obtain the privileges of the session
user. However, since there is in general no privilege checking for changes
of session-local state, it is also possible for such a function to change
settings in a way that might subvert later operations in the same session.
Examples include changing search_path to cause an unexpected function to
be called, or replacing an existing prepared statement with another one
that will execute a function of the attacker's choosing.
The present patch secures VACUUM, ANALYZE, and CREATE INDEX/REINDEX against
these threats, which are the same places previously deemed to need protection
against the SET ROLE issue. GUC changes are still allowed, since there are
many useful cases for that, but we prevent security problems by forcing a
rollback of any GUC change after completing the operation. Other cases are
handled by throwing an error if any change is attempted; these include temp
table creation, closing a cursor, and creating or deleting a prepared
statement. (In 7.4, the infrastructure to roll back GUC changes doesn't
exist, so we settle for rejecting changes of "search_path" in these contexts.)
Original report and patch by Gurjeet Singh, additional analysis by
Tom Lane.
Security: CVE-2009-4136
2009-12-09 22:57:51 +01:00
|
|
|
GetUserIdAndSecContext(&save_userid, &save_sec_context);
|
|
|
|
SetUserIdAndSecContext(heapRelation->rd_rel->relowner,
|
|
|
|
save_sec_context | SECURITY_RESTRICTED_OPERATION);
|
|
|
|
save_nestlevel = NewGUCNestLevel();
|
2008-01-03 22:23:15 +01:00
|
|
|
|
2006-08-25 06:06:58 +02:00
|
|
|
/*
|
|
|
|
* Scan the index and gather up all the TIDs into a tuplesort object.
|
|
|
|
*/
|
|
|
|
ivinfo.index = indexRelation;
|
2009-03-24 21:17:18 +01:00
|
|
|
ivinfo.analyze_only = false;
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
ivinfo.report_progress = true;
|
2009-06-07 00:13:52 +02:00
|
|
|
ivinfo.estimated_count = true;
|
2006-08-25 06:06:58 +02:00
|
|
|
ivinfo.message_level = DEBUG2;
|
2009-06-07 00:13:52 +02:00
|
|
|
ivinfo.num_heap_tuples = heapRelation->rd_rel->reltuples;
|
2007-05-30 22:12:03 +02:00
|
|
|
ivinfo.strategy = NULL;
|
2006-08-25 06:06:58 +02:00
|
|
|
|
2015-12-16 21:23:45 +01:00
|
|
|
/*
|
|
|
|
* Encode TIDs as int8 values for the sort, rather than directly sorting
|
|
|
|
* item pointers. This can be significantly faster, primarily because TID
|
|
|
|
* is a pass-by-reference type on all platforms, whereas int8 is
|
|
|
|
* pass-by-value on most platforms.
|
|
|
|
*/
|
|
|
|
state.tuplesort = tuplesort_begin_datum(INT8OID, Int8LessOperator,
|
2011-04-22 23:43:18 +02:00
|
|
|
InvalidOid, false,
|
2006-08-25 06:06:58 +02:00
|
|
|
maintenance_work_mem,
|
Support parallel btree index builds.
To make this work, tuplesort.c and logtape.c must also support
parallelism, so this patch adds that infrastructure and then applies
it to the particular case of parallel btree index builds. Testing
to date shows that this can often be 2-3x faster than a serial
index build.
The model for deciding how many workers to use is fairly primitive
at present, but it's better than not having the feature. We can
refine it as we get more experience.
Peter Geoghegan with some help from Rushabh Lathia. While Heikki
Linnakangas is not an author of this patch, he wrote other patches
without which this feature would not have been possible, and
therefore the release notes should possibly credit him as an author
of this feature. Reviewed by Claudio Freire, Heikki Linnakangas,
Thomas Munro, Tels, Amit Kapila, me.
Discussion: http://postgr.es/m/CAM3SWZQKM=Pzc=CAHzRixKjp2eO5Q0Jg1SoFQqeXFQ647JiwqQ@mail.gmail.com
Discussion: http://postgr.es/m/CAH2-Wz=AxWqDoVvGU7dq856S4r6sJAj6DBn7VMtigkB33N5eyg@mail.gmail.com
2018-02-02 19:25:55 +01:00
|
|
|
NULL, false);
|
2006-08-25 06:06:58 +02:00
|
|
|
state.htups = state.itups = state.tups_inserted = 0;
|
|
|
|
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
/* ambulkdelete updates progress metrics */
|
2006-08-25 06:06:58 +02:00
|
|
|
(void) index_bulk_delete(&ivinfo, NULL,
|
|
|
|
validate_index_callback, (void *) &state);
|
|
|
|
|
|
|
|
/* Execute the sort */
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
{
|
|
|
|
const int index[] = {
|
|
|
|
PROGRESS_CREATEIDX_PHASE,
|
|
|
|
PROGRESS_SCAN_BLOCKS_DONE,
|
|
|
|
PROGRESS_SCAN_BLOCKS_TOTAL
|
|
|
|
};
|
2019-05-22 18:55:34 +02:00
|
|
|
const int64 val[] = {
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT,
|
|
|
|
0, 0
|
|
|
|
};
|
|
|
|
|
|
|
|
pgstat_progress_update_multi_param(3, index, val);
|
|
|
|
}
|
2006-08-25 06:06:58 +02:00
|
|
|
tuplesort_performsort(state.tuplesort);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now scan the heap and "merge" it with the index
|
|
|
|
*/
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-02 20:18:08 +02:00
|
|
|
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
|
|
|
|
PROGRESS_CREATEIDX_PHASE_VALIDATE_TABLESCAN);
|
2019-03-28 03:59:06 +01:00
|
|
|
table_index_validate_scan(heapRelation,
|
|
|
|
indexRelation,
|
|
|
|
indexInfo,
|
|
|
|
snapshot,
|
|
|
|
&state);
|
2006-08-25 06:06:58 +02:00
|
|
|
|
|
|
|
/* Done with tuplesort object */
|
|
|
|
tuplesort_end(state.tuplesort);
|
|
|
|
|
|
|
|
elog(DEBUG2,
|
|
|
|
"validate_index found %.0f heap tuples, %.0f index tuples; inserted %.0f missing tuples",
|
|
|
|
state.htups, state.itups, state.tups_inserted);
|
|
|
|
|
Prevent indirect security attacks via changing session-local state within
an allegedly immutable index function. It was previously recognized that
we had to prevent such a function from executing SET/RESET ROLE/SESSION
AUTHORIZATION, or it could trivially obtain the privileges of the session
user. However, since there is in general no privilege checking for changes
of session-local state, it is also possible for such a function to change
settings in a way that might subvert later operations in the same session.
Examples include changing search_path to cause an unexpected function to
be called, or replacing an existing prepared statement with another one
that will execute a function of the attacker's choosing.
The present patch secures VACUUM, ANALYZE, and CREATE INDEX/REINDEX against
these threats, which are the same places previously deemed to need protection
against the SET ROLE issue. GUC changes are still allowed, since there are
many useful cases for that, but we prevent security problems by forcing a
rollback of any GUC change after completing the operation. Other cases are
handled by throwing an error if any change is attempted; these include temp
table creation, closing a cursor, and creating or deleting a prepared
statement. (In 7.4, the infrastructure to roll back GUC changes doesn't
exist, so we settle for rejecting changes of "search_path" in these contexts.)
Original report and patch by Gurjeet Singh, additional analysis by
Tom Lane.
Security: CVE-2009-4136
2009-12-09 22:57:51 +01:00
|
|
|
/* Roll back any GUC changes executed by index functions */
|
|
|
|
AtEOXact_GUC(false, save_nestlevel);
|
|
|
|
|
|
|
|
/* Restore userid and security context */
|
|
|
|
SetUserIdAndSecContext(save_userid, save_sec_context);
|
2008-01-03 22:23:15 +01:00
|
|
|
|
2006-08-25 06:06:58 +02:00
|
|
|
/* Close rels, but keep locks */
|
|
|
|
index_close(indexRelation, NoLock);
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(heapRelation, NoLock);
|
2006-08-25 06:06:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* validate_index_callback - bulkdelete callback to collect the index TIDs
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
validate_index_callback(ItemPointer itemptr, void *opaque)
|
|
|
|
{
|
2019-03-28 03:59:06 +01:00
|
|
|
ValidateIndexState *state = (ValidateIndexState *) opaque;
|
2015-12-16 21:23:45 +01:00
|
|
|
int64 encoded = itemptr_encode(itemptr);
|
2006-08-25 06:06:58 +02:00
|
|
|
|
2015-12-16 21:23:45 +01:00
|
|
|
tuplesort_putdatum(state->tuplesort, Int64GetDatum(encoded), false);
|
2006-08-25 06:06:58 +02:00
|
|
|
state->itups += 1;
|
|
|
|
return false; /* never actually delete anything */
|
|
|
|
}
|
|
|
|
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
/*
|
|
|
|
* index_set_state_flags - adjust pg_index state flags
|
|
|
|
*
|
|
|
|
* This is used during CREATE/DROP INDEX CONCURRENTLY to adjust the pg_index
|
2013-08-01 16:46:19 +02:00
|
|
|
* flags that denote the index's state. Because the update is not
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* transactional and will not roll back on error, this must only be used as
|
|
|
|
* the last step in a transaction that has not made any transactional catalog
|
|
|
|
* updates!
|
|
|
|
*
|
|
|
|
* Note that heap_inplace_update does send a cache inval message for the
|
|
|
|
* tuple, so other sessions will hear about the update as soon as we commit.
|
2013-08-01 16:46:19 +02:00
|
|
|
*
|
|
|
|
* NB: In releases prior to PostgreSQL 9.4, the use of a non-transactional
|
|
|
|
* update here would have been unsafe; now that MVCC rules apply even for
|
|
|
|
* system catalog scans, we could potentially use a transactional update here
|
|
|
|
* instead.
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
index_set_state_flags(Oid indexId, IndexStateFlagsAction action)
|
|
|
|
{
|
|
|
|
Relation pg_index;
|
|
|
|
HeapTuple indexTuple;
|
|
|
|
Form_pg_index indexForm;
|
|
|
|
|
|
|
|
/* Assert that current xact hasn't done any transactional updates */
|
|
|
|
Assert(GetTopTransactionIdIfAny() == InvalidTransactionId);
|
|
|
|
|
|
|
|
/* Open pg_index and fetch a writable copy of the index's tuple */
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_index = table_open(IndexRelationId, RowExclusiveLock);
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
|
|
|
|
indexTuple = SearchSysCacheCopy1(INDEXRELID,
|
|
|
|
ObjectIdGetDatum(indexId));
|
|
|
|
if (!HeapTupleIsValid(indexTuple))
|
|
|
|
elog(ERROR, "cache lookup failed for index %u", indexId);
|
|
|
|
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
|
|
|
|
|
|
|
/* Perform the requested state change on the copy */
|
|
|
|
switch (action)
|
|
|
|
{
|
|
|
|
case INDEX_CREATE_SET_READY:
|
|
|
|
/* Set indisready during a CREATE INDEX CONCURRENTLY sequence */
|
|
|
|
Assert(indexForm->indislive);
|
|
|
|
Assert(!indexForm->indisready);
|
|
|
|
Assert(!indexForm->indisvalid);
|
|
|
|
indexForm->indisready = true;
|
|
|
|
break;
|
|
|
|
case INDEX_CREATE_SET_VALID:
|
|
|
|
/* Set indisvalid during a CREATE INDEX CONCURRENTLY sequence */
|
|
|
|
Assert(indexForm->indislive);
|
|
|
|
Assert(indexForm->indisready);
|
|
|
|
Assert(!indexForm->indisvalid);
|
|
|
|
indexForm->indisvalid = true;
|
|
|
|
break;
|
|
|
|
case INDEX_DROP_CLEAR_VALID:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear indisvalid during a DROP INDEX CONCURRENTLY sequence
|
|
|
|
*
|
|
|
|
* If indisready == true we leave it set so the index still gets
|
|
|
|
* maintained by active transactions. We only need to ensure that
|
|
|
|
* indisvalid is false. (We don't assert that either is initially
|
|
|
|
* true, though, since we want to be able to retry a DROP INDEX
|
|
|
|
* CONCURRENTLY that failed partway through.)
|
|
|
|
*
|
|
|
|
* Note: the CLUSTER logic assumes that indisclustered cannot be
|
|
|
|
* set on any invalid index, so clear that flag too.
|
|
|
|
*/
|
|
|
|
indexForm->indisvalid = false;
|
|
|
|
indexForm->indisclustered = false;
|
|
|
|
break;
|
|
|
|
case INDEX_DROP_SET_DEAD:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear indisready/indislive during DROP INDEX CONCURRENTLY
|
|
|
|
*
|
|
|
|
* We clear both indisready and indislive, because we not only
|
|
|
|
* want to stop updates, we want to prevent sessions from touching
|
|
|
|
* the index at all.
|
|
|
|
*/
|
|
|
|
Assert(!indexForm->indisvalid);
|
|
|
|
indexForm->indisready = false;
|
|
|
|
indexForm->indislive = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ... and write it back in-place */
|
|
|
|
heap_inplace_update(pg_index, indexTuple);
|
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_index, RowExclusiveLock);
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
1999-11-21 21:01:10 +01:00
|
|
|
/*
|
|
|
|
* IndexGetRelation: given an index's relation OID, get the OID of the
|
2014-05-06 18:12:18 +02:00
|
|
|
* relation it is an index on. Uses the system cache.
|
1999-11-21 21:01:10 +01:00
|
|
|
*/
|
Improve table locking behavior in the face of current DDL.
In the previous coding, callers were faced with an awkward choice:
look up the name, do permissions checks, and then lock the table; or
look up the name, lock the table, and then do permissions checks.
The first choice was wrong because the results of the name lookup
and permissions checks might be out-of-date by the time the table
lock was acquired, while the second allowed a user with no privileges
to interfere with access to a table by users who do have privileges
(e.g. if a malicious backend queues up for an AccessExclusiveLock on
a table on which AccessShareLock is already held, further attempts
to access the table will be blocked until the AccessExclusiveLock
is obtained and the malicious backend's transaction rolls back).
To fix, allow callers of RangeVarGetRelid() to pass a callback which
gets executed after performing the name lookup but before acquiring
the relation lock. If the name lookup is retried (because
invalidation messages are received), the callback will be re-executed
as well, so we get the best of both worlds. RangeVarGetRelid() is
renamed to RangeVarGetRelidExtended(); callers not wishing to supply
a callback can continue to invoke it as RangeVarGetRelid(), which is
now a macro. Since the only one caller that uses nowait = true now
passes a callback anyway, the RangeVarGetRelid() macro defaults nowait
as well. The callback can also be used for supplemental locking - for
example, REINDEX INDEX needs to acquire the table lock before the index
lock to reduce deadlock possibilities.
There's a lot more work to be done here to fix all the cases where this
can be a problem, but this commit provides the general infrastructure
and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE,
LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE.
Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
|
|
|
Oid
|
|
|
|
IndexGetRelation(Oid indexId, bool missing_ok)
|
1999-11-21 21:01:10 +01:00
|
|
|
{
|
|
|
|
HeapTuple tuple;
|
|
|
|
Form_pg_index index;
|
2000-11-16 23:30:52 +01:00
|
|
|
Oid result;
|
1999-11-21 21:01:10 +01:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexId));
|
1999-11-21 21:01:10 +01:00
|
|
|
if (!HeapTupleIsValid(tuple))
|
Improve table locking behavior in the face of current DDL.
In the previous coding, callers were faced with an awkward choice:
look up the name, do permissions checks, and then lock the table; or
look up the name, lock the table, and then do permissions checks.
The first choice was wrong because the results of the name lookup
and permissions checks might be out-of-date by the time the table
lock was acquired, while the second allowed a user with no privileges
to interfere with access to a table by users who do have privileges
(e.g. if a malicious backend queues up for an AccessExclusiveLock on
a table on which AccessShareLock is already held, further attempts
to access the table will be blocked until the AccessExclusiveLock
is obtained and the malicious backend's transaction rolls back).
To fix, allow callers of RangeVarGetRelid() to pass a callback which
gets executed after performing the name lookup but before acquiring
the relation lock. If the name lookup is retried (because
invalidation messages are received), the callback will be re-executed
as well, so we get the best of both worlds. RangeVarGetRelid() is
renamed to RangeVarGetRelidExtended(); callers not wishing to supply
a callback can continue to invoke it as RangeVarGetRelid(), which is
now a macro. Since the only one caller that uses nowait = true now
passes a callback anyway, the RangeVarGetRelid() macro defaults nowait
as well. The callback can also be used for supplemental locking - for
example, REINDEX INDEX needs to acquire the table lock before the index
lock to reduce deadlock possibilities.
There's a lot more work to be done here to fix all the cases where this
can be a problem, but this commit provides the general infrastructure
and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE,
LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE.
Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
|
|
|
{
|
|
|
|
if (missing_ok)
|
|
|
|
return InvalidOid;
|
2003-07-21 03:59:11 +02:00
|
|
|
elog(ERROR, "cache lookup failed for index %u", indexId);
|
Improve table locking behavior in the face of current DDL.
In the previous coding, callers were faced with an awkward choice:
look up the name, do permissions checks, and then lock the table; or
look up the name, lock the table, and then do permissions checks.
The first choice was wrong because the results of the name lookup
and permissions checks might be out-of-date by the time the table
lock was acquired, while the second allowed a user with no privileges
to interfere with access to a table by users who do have privileges
(e.g. if a malicious backend queues up for an AccessExclusiveLock on
a table on which AccessShareLock is already held, further attempts
to access the table will be blocked until the AccessExclusiveLock
is obtained and the malicious backend's transaction rolls back).
To fix, allow callers of RangeVarGetRelid() to pass a callback which
gets executed after performing the name lookup but before acquiring
the relation lock. If the name lookup is retried (because
invalidation messages are received), the callback will be re-executed
as well, so we get the best of both worlds. RangeVarGetRelid() is
renamed to RangeVarGetRelidExtended(); callers not wishing to supply
a callback can continue to invoke it as RangeVarGetRelid(), which is
now a macro. Since the only one caller that uses nowait = true now
passes a callback anyway, the RangeVarGetRelid() macro defaults nowait
as well. The callback can also be used for supplemental locking - for
example, REINDEX INDEX needs to acquire the table lock before the index
lock to reduce deadlock possibilities.
There's a lot more work to be done here to fix all the cases where this
can be a problem, but this commit provides the general infrastructure
and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE,
LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE.
Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
|
|
|
}
|
1999-11-21 21:01:10 +01:00
|
|
|
index = (Form_pg_index) GETSTRUCT(tuple);
|
|
|
|
Assert(index->indexrelid == indexId);
|
|
|
|
|
2000-11-16 23:30:52 +01:00
|
|
|
result = index->indrelid;
|
|
|
|
ReleaseSysCache(tuple);
|
|
|
|
return result;
|
1999-11-21 21:01:10 +01:00
|
|
|
}
|
|
|
|
|
2003-09-24 20:54:02 +02:00
|
|
|
/*
|
|
|
|
* reindex_index - This routine is used to recreate a single index
|
2000-02-18 10:30:20 +01:00
|
|
|
*/
|
2003-09-24 20:54:02 +02:00
|
|
|
void
|
2015-05-15 13:09:57 +02:00
|
|
|
reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
|
2015-05-24 03:35:49 +02:00
|
|
|
int options)
|
2000-02-18 10:30:20 +01:00
|
|
|
{
|
2000-04-12 19:17:23 +02:00
|
|
|
Relation iRel,
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
heapRelation;
|
2001-10-07 01:21:45 +02:00
|
|
|
Oid heapId;
|
2009-03-27 16:57:11 +01:00
|
|
|
IndexInfo *indexInfo;
|
2010-02-07 23:40:33 +01:00
|
|
|
volatile bool skipped_constraint = false;
|
2015-05-15 13:09:57 +02:00
|
|
|
PGRUsage ru0;
|
2019-09-13 19:51:13 +02:00
|
|
|
bool progress = (options & REINDEXOPT_REPORT_PROGRESS) != 0;
|
2015-05-15 13:09:57 +02:00
|
|
|
|
|
|
|
pg_rusage_init(&ru0);
|
2000-02-18 10:30:20 +01:00
|
|
|
|
2001-11-20 03:46:13 +01:00
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Open and lock the parent heap relation. ShareLock is sufficient since
|
2005-10-15 04:49:52 +02:00
|
|
|
* we only need to be sure no schema or data changes are going on.
|
2004-10-01 19:11:50 +02:00
|
|
|
*/
|
Improve table locking behavior in the face of current DDL.
In the previous coding, callers were faced with an awkward choice:
look up the name, do permissions checks, and then lock the table; or
look up the name, lock the table, and then do permissions checks.
The first choice was wrong because the results of the name lookup
and permissions checks might be out-of-date by the time the table
lock was acquired, while the second allowed a user with no privileges
to interfere with access to a table by users who do have privileges
(e.g. if a malicious backend queues up for an AccessExclusiveLock on
a table on which AccessShareLock is already held, further attempts
to access the table will be blocked until the AccessExclusiveLock
is obtained and the malicious backend's transaction rolls back).
To fix, allow callers of RangeVarGetRelid() to pass a callback which
gets executed after performing the name lookup but before acquiring
the relation lock. If the name lookup is retried (because
invalidation messages are received), the callback will be re-executed
as well, so we get the best of both worlds. RangeVarGetRelid() is
renamed to RangeVarGetRelidExtended(); callers not wishing to supply
a callback can continue to invoke it as RangeVarGetRelid(), which is
now a macro. Since the only one caller that uses nowait = true now
passes a callback anyway, the RangeVarGetRelid() macro defaults nowait
as well. The callback can also be used for supplemental locking - for
example, REINDEX INDEX needs to acquire the table lock before the index
lock to reduce deadlock possibilities.
There's a lot more work to be done here to fix all the cases where this
can be a problem, but this commit provides the general infrastructure
and fixes the following specific cases: REINDEX INDEX, REINDEX TABLE,
LOCK TABLE, and and DROP TABLE/INDEX/SEQUENCE/VIEW/FOREIGN TABLE.
Per discussion with Noah Misch and Alvaro Herrera.
2011-11-30 16:12:27 +01:00
|
|
|
heapId = IndexGetRelation(indexId, false);
|
2019-01-21 19:32:19 +01:00
|
|
|
heapRelation = table_open(heapId, ShareLock);
|
2004-10-01 19:11:50 +02:00
|
|
|
|
2019-09-13 19:51:13 +02:00
|
|
|
if (progress)
|
|
|
|
{
|
|
|
|
pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX,
|
|
|
|
heapId);
|
|
|
|
pgstat_progress_update_param(PROGRESS_CREATEIDX_COMMAND,
|
|
|
|
PROGRESS_CREATEIDX_COMMAND_REINDEX);
|
|
|
|
pgstat_progress_update_param(PROGRESS_CREATEIDX_INDEX_OID,
|
|
|
|
indexId);
|
|
|
|
}
|
2019-04-07 11:30:14 +02:00
|
|
|
|
2004-10-01 19:11:50 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Open the target index relation and get an exclusive lock on it, to
|
|
|
|
* ensure that no one else is touching this particular index.
|
2001-11-20 03:46:13 +01:00
|
|
|
*/
|
2006-07-31 22:09:10 +02:00
|
|
|
iRel = index_open(indexId, AccessExclusiveLock);
|
2001-11-20 03:46:13 +01:00
|
|
|
|
2019-09-13 19:51:13 +02:00
|
|
|
if (progress)
|
|
|
|
pgstat_progress_update_param(PROGRESS_CREATEIDX_ACCESS_METHOD_OID,
|
|
|
|
iRel->rd_rel->relam);
|
2019-04-07 11:30:14 +02:00
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
/*
|
|
|
|
* The case of reindexing partitioned tables and indexes is handled
|
|
|
|
* differently by upper layers, so this case shouldn't arise.
|
|
|
|
*/
|
|
|
|
if (iRel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX)
|
|
|
|
elog(ERROR, "unsupported relation kind for index \"%s\"",
|
|
|
|
RelationGetRelationName(iRel));
|
|
|
|
|
2008-01-30 20:46:48 +01:00
|
|
|
/*
|
|
|
|
* Don't allow reindex on temp tables of other backends ... their local
|
|
|
|
* buffer manager is not going to cope.
|
|
|
|
*/
|
2009-04-01 00:12:48 +02:00
|
|
|
if (RELATION_IS_OTHER_TEMP(iRel))
|
2008-01-30 20:46:48 +01:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
errmsg("cannot reindex temporary tables of other sessions")));
|
2008-01-30 20:46:48 +01:00
|
|
|
|
2020-03-10 07:38:17 +01:00
|
|
|
/*
|
|
|
|
* Don't allow reindex of an invalid index on TOAST table. This is a
|
|
|
|
* leftover from a failed REINDEX CONCURRENTLY, and if rebuilt it would
|
|
|
|
* not be possible to drop it anymore.
|
|
|
|
*/
|
|
|
|
if (IsToastNamespace(RelationGetNamespace(iRel)) &&
|
|
|
|
!get_index_isvalid(indexId))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("cannot reindex invalid index on TOAST table")));
|
|
|
|
|
2008-01-30 20:46:48 +01:00
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* Also check for active uses of the index in the current transaction; we
|
|
|
|
* don't want to reindex underneath an open indexscan.
|
2008-01-30 20:46:48 +01:00
|
|
|
*/
|
|
|
|
CheckTableNotInUse(iRel, "REINDEX INDEX");
|
|
|
|
|
2011-06-08 12:47:21 +02:00
|
|
|
/*
|
|
|
|
* All predicate locks on the index are about to be made invalid. Promote
|
|
|
|
* them to relation locks on the heap.
|
|
|
|
*/
|
|
|
|
TransferPredicateLocksToHeapRelation(iRel);
|
|
|
|
|
Fix potential assertion failure when reindexing a pg_class index.
When reindexing individual indexes on pg_class it was possible to
either trigger an assertion failure:
TRAP: FailedAssertion("!(!ReindexIsProcessingIndex(((index)->rd_id)))
That's because reindex_index() called SetReindexProcessing() - which
enables an asserts ensuring no index insertions happen into the index
- before calling RelationSetNewRelfilenode(). That not correct for
indexes on pg_class, because RelationSetNewRelfilenode() updates the
relevant pg_class row, which needs to update the indexes.
The are two reasons this wasn't noticed earlier. Firstly the bug
doesn't trigger when reindexing all of pg_class, as reindex_relation
has code "hiding" all yet-to-be-reindexed indexes. Secondly, the bug
only triggers when the the update to pg_class doesn't turn out to be a
HOT update - otherwise there's no index insertion to trigger the
bug. Most of the time there's enough space, making this bug hard to
trigger.
To fix, move RelationSetNewRelfilenode() to before the
SetReindexProcessing() (and, together with some other code, to outside
of the PG_TRY()).
To make sure the error checking intended by SetReindexProcessing() is
more robust, modify CatalogIndexInsert() to check
ReindexIsProcessingIndex() even when the update is a HOT update.
Also add a few regression tests for REINDEXing of system catalogs.
The last two improvements would have prevented some of the issues
fixed in 5c1560606dc4c from being introduced in the first place.
Reported-By: Michael Paquier
Diagnosed-By: Tom Lane and Andres Freund
Author: Andres Freund
Reviewed-By: Tom Lane
Discussion: https://postgr.es/m/20190418011430.GA19133@paquier.xyz
Backpatch: 9.4-, the bug is present in all branches
2019-04-30 04:42:04 +02:00
|
|
|
/* Fetch info needed for index_build */
|
|
|
|
indexInfo = BuildIndexInfo(iRel);
|
|
|
|
|
|
|
|
/* If requested, skip checking uniqueness/exclusion constraints */
|
|
|
|
if (skip_constraint_checks)
|
|
|
|
{
|
|
|
|
if (indexInfo->ii_Unique || indexInfo->ii_ExclusionOps != NULL)
|
|
|
|
skipped_constraint = true;
|
|
|
|
indexInfo->ii_Unique = false;
|
|
|
|
indexInfo->ii_ExclusionOps = NULL;
|
|
|
|
indexInfo->ii_ExclusionProcs = NULL;
|
|
|
|
indexInfo->ii_ExclusionStrats = NULL;
|
|
|
|
}
|
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
/* Suppress use of the target index while rebuilding it */
|
|
|
|
SetReindexProcessing(heapId, indexId);
|
2004-08-01 19:32:22 +02:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
/* Create a new physical relation for the index */
|
|
|
|
RelationSetNewRelfilenode(iRel, persistence);
|
Fix reindexing of pg_class indexes some more.
Commits 3dbb317d3 et al failed under CLOBBER_CACHE_ALWAYS testing.
Investigation showed that to reindex pg_class_oid_index, we must
suppress accesses to the index (via SetReindexProcessing) before we call
RelationSetNewRelfilenode, or at least before we do CommandCounterIncrement
therein; otherwise, relcache reloads happening within the CCI may try to
fetch pg_class rows using the index's new relfilenode value, which is as
yet an empty file.
Of course, the point of 3dbb317d3 was that that ordering didn't work
either, because then RelationSetNewRelfilenode's own update of the index's
pg_class row cannot access the index, should it need to.
There are various ways we might have got around that, but Andres Freund
came up with a brilliant solution: for a mapped index, we can really just
skip the pg_class update altogether. The only fields it was actually
changing were relpages etc, but it was just setting them to zeroes which
is useless make-work. (Correct new values will be installed at the end
of index build.) All pg_class indexes are mapped and probably always will
be, so this eliminates the problem by removing work rather than adding it,
always a pleasant outcome. Having taught RelationSetNewRelfilenode to do
it that way, we can revert the code reordering in reindex_index. (But
I left the moved setup code where it was; there seems no reason why it
has to run without use of the old index. If you're trying to fix a
busted pg_class index, you'll have had to disable system index use
altogether to get this far.)
Moreover, this means we don't need RelationSetIndexList at all, because
reindex_relation's hacking to make "REINDEX TABLE pg_class" work is
likewise now unnecessary. We'll leave that code in place in the back
branches, but a follow-on patch will remove it in HEAD.
In passing, do some minor cleanup for commit 5c1560606 (in HEAD only),
notably removing a duplicate newrnode assignment.
Patch by me, using a core idea due to Andres Freund. Back-patch to all
supported branches, as 3dbb317d3 was.
Discussion: https://postgr.es/m/28926.1556664156@sss.pgh.pa.us
2019-05-03 01:11:28 +02:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
/* Initialize the index and rebuild */
|
|
|
|
/* Note: we do not need to re-establish pkey setting */
|
|
|
|
index_build(heapRelation, iRel, indexInfo, true, true);
|
|
|
|
|
|
|
|
/* Re-allow use of target index */
|
|
|
|
ResetReindexProcessing();
|
2006-05-11 01:18:39 +02:00
|
|
|
|
2006-08-25 06:06:58 +02:00
|
|
|
/*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* If the index is marked invalid/not-ready/dead (ie, it's from a failed
|
|
|
|
* CREATE INDEX CONCURRENTLY, or a DROP INDEX CONCURRENTLY failed midway),
|
|
|
|
* and we didn't skip a uniqueness check, we can now mark it valid. This
|
|
|
|
* allows REINDEX to be used to clean up in such cases.
|
2009-03-27 16:57:11 +01:00
|
|
|
*
|
|
|
|
* We can also reset indcheckxmin, because we have now done a
|
|
|
|
* non-concurrent index build, *except* in the case where index_build
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* found some still-broken HOT chains. If it did, and we don't have to
|
|
|
|
* change any of the other flags, we just leave indcheckxmin alone (note
|
|
|
|
* that index_build won't have changed it, because this is a reindex).
|
|
|
|
* This is okay and desirable because not updating the tuple leaves the
|
|
|
|
* index's usability horizon (recorded as the tuple's xmin value) the same
|
|
|
|
* as it was.
|
2011-04-20 00:50:56 +02:00
|
|
|
*
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* But, if the index was invalid/not-ready/dead and there were broken HOT
|
|
|
|
* chains, we had better force indcheckxmin true, because the normal
|
|
|
|
* argument that the HOT chains couldn't conflict with the index is
|
|
|
|
* suspect for an invalid index. (A conflict is definitely possible if
|
2014-05-06 18:12:18 +02:00
|
|
|
* the index was dead. It probably shouldn't happen otherwise, but let's
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
* be conservative.) In this case advancing the usability horizon is
|
|
|
|
* appropriate.
|
|
|
|
*
|
|
|
|
* Another reason for avoiding unnecessary updates here is that while
|
|
|
|
* reindexing pg_index itself, we must not try to update tuples in it.
|
|
|
|
* pg_index's indexes should always have these flags in their clean state,
|
|
|
|
* so that won't happen.
|
2016-06-10 16:25:31 +02:00
|
|
|
*
|
|
|
|
* If early pruning/vacuuming is enabled for the heap relation, the
|
|
|
|
* usability horizon must be advanced to the current transaction on every
|
|
|
|
* build or rebuild. pg_index is OK in this regard because catalog tables
|
|
|
|
* are not subject to early cleanup.
|
2006-08-25 06:06:58 +02:00
|
|
|
*/
|
2011-04-19 22:55:34 +02:00
|
|
|
if (!skipped_constraint)
|
2010-02-07 23:40:33 +01:00
|
|
|
{
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
Relation pg_index;
|
|
|
|
HeapTuple indexTuple;
|
|
|
|
Form_pg_index indexForm;
|
|
|
|
bool index_bad;
|
2016-06-10 18:24:01 +02:00
|
|
|
bool early_pruning_enabled = EarlyPruningEnabled(heapRelation);
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
pg_index = table_open(IndexRelationId, RowExclusiveLock);
|
2006-08-25 06:06:58 +02:00
|
|
|
|
2010-02-14 19:42:19 +01:00
|
|
|
indexTuple = SearchSysCacheCopy1(INDEXRELID,
|
|
|
|
ObjectIdGetDatum(indexId));
|
2010-02-07 23:40:33 +01:00
|
|
|
if (!HeapTupleIsValid(indexTuple))
|
|
|
|
elog(ERROR, "cache lookup failed for index %u", indexId);
|
|
|
|
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
|
2006-08-25 06:06:58 +02:00
|
|
|
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
index_bad = (!indexForm->indisvalid ||
|
|
|
|
!indexForm->indisready ||
|
|
|
|
!indexForm->indislive);
|
|
|
|
if (index_bad ||
|
2016-06-10 16:25:31 +02:00
|
|
|
(indexForm->indcheckxmin && !indexInfo->ii_BrokenHotChain) ||
|
2016-06-10 18:24:01 +02:00
|
|
|
early_pruning_enabled)
|
2010-02-07 23:40:33 +01:00
|
|
|
{
|
2016-06-10 18:24:01 +02:00
|
|
|
if (!indexInfo->ii_BrokenHotChain && !early_pruning_enabled)
|
2010-02-07 23:40:33 +01:00
|
|
|
indexForm->indcheckxmin = false;
|
2016-06-10 18:24:01 +02:00
|
|
|
else if (index_bad || early_pruning_enabled)
|
2011-04-21 01:01:20 +02:00
|
|
|
indexForm->indcheckxmin = true;
|
|
|
|
indexForm->indisvalid = true;
|
|
|
|
indexForm->indisready = true;
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
indexForm->indislive = true;
|
2017-01-31 22:42:24 +01:00
|
|
|
CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple);
|
Fix assorted bugs in CREATE/DROP INDEX CONCURRENTLY.
Commit 8cb53654dbdb4c386369eb988062d0bbb6de725e, which introduced DROP
INDEX CONCURRENTLY, managed to break CREATE INDEX CONCURRENTLY via a poor
choice of catalog state representation. The pg_index state for an index
that's reached the final pre-drop stage was the same as the state for an
index just created by CREATE INDEX CONCURRENTLY. This meant that the
(necessary) change to make RelationGetIndexList ignore about-to-die indexes
also made it ignore freshly-created indexes; which is catastrophic because
the latter do need to be considered in HOT-safety decisions. Failure to
do so leads to incorrect index entries and subsequently wrong results from
queries depending on the concurrently-created index.
To fix, add an additional boolean column "indislive" to pg_index, so that
the freshly-created and about-to-die states can be distinguished. (This
change obviously is only possible in HEAD. This patch will need to be
back-patched, but in 9.2 we'll use a kluge consisting of overloading the
formerly-impossible state of indisvalid = true and indisready = false.)
In addition, change CREATE/DROP INDEX CONCURRENTLY so that the pg_index
flag changes they make without exclusive lock on the index are made via
heap_inplace_update() rather than a normal transactional update. The
latter is not very safe because moving the pg_index tuple could result in
concurrent SnapshotNow scans finding it twice or not at all, thus possibly
resulting in index corruption. This is a pre-existing bug in CREATE INDEX
CONCURRENTLY, which was copied into the DROP code.
In addition, fix various places in the code that ought to check to make
sure that the indexes they are manipulating are valid and/or ready as
appropriate. These represent bugs that have existed since 8.2, since
a failed CREATE INDEX CONCURRENTLY could leave a corrupt or invalid
index behind, and we ought not try to do anything that might fail with
such an index.
Also fix RelationReloadIndexInfo to ensure it copies all the pg_index
columns that are allowed to change after initial creation. Previously we
could have been left with stale values of some fields in an index relcache
entry. It's not clear whether this actually had any user-visible
consequences, but it's at least a bug waiting to happen.
In addition, do some code and docs review for DROP INDEX CONCURRENTLY;
some cosmetic code cleanup but mostly addition and revision of comments.
This will need to be back-patched, but in a noticeably different form,
so I'm committing it to HEAD before working on the back-patch.
Problem reported by Amit Kapila, diagnosis by Pavan Deolassee,
fix by Tom Lane and Andres Freund.
2012-11-29 03:25:27 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Invalidate the relcache for the table, so that after we commit
|
|
|
|
* all sessions will refresh the table's index list. This ensures
|
|
|
|
* that if anyone misses seeing the pg_index row during this
|
|
|
|
* update, they'll refresh their list before attempting any update
|
|
|
|
* on the table.
|
|
|
|
*/
|
|
|
|
CacheInvalidateRelcache(heapRelation);
|
2010-02-07 23:40:33 +01:00
|
|
|
}
|
|
|
|
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(pg_index, RowExclusiveLock);
|
2006-08-25 06:06:58 +02:00
|
|
|
}
|
|
|
|
|
2015-05-15 13:09:57 +02:00
|
|
|
/* Log what we did */
|
|
|
|
if (options & REINDEXOPT_VERBOSE)
|
|
|
|
ereport(INFO,
|
|
|
|
(errmsg("index \"%s\" was reindexed",
|
|
|
|
get_rel_name(indexId)),
|
2017-06-04 17:41:16 +02:00
|
|
|
errdetail_internal("%s",
|
2017-06-13 19:05:59 +02:00
|
|
|
pg_rusage_show(&ru0))));
|
2015-05-15 13:09:57 +02:00
|
|
|
|
2019-09-13 19:51:13 +02:00
|
|
|
if (progress)
|
|
|
|
pgstat_progress_end_command();
|
2019-04-07 11:30:14 +02:00
|
|
|
|
2006-05-11 01:18:39 +02:00
|
|
|
/* Close rels, but keep locks */
|
2006-07-31 22:09:10 +02:00
|
|
|
index_close(iRel, NoLock);
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(heapRelation, NoLock);
|
2000-02-18 10:30:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-09-24 20:54:02 +02:00
|
|
|
* reindex_relation - This routine is used to recreate all indexes
|
2004-05-08 02:34:49 +02:00
|
|
|
* of a relation (and optionally its toast relation too, if any).
|
2002-09-23 02:42:48 +02:00
|
|
|
*
|
2011-04-16 23:26:41 +02:00
|
|
|
* "flags" is a bitmask that can include any combination of these bits:
|
2010-02-07 21:48:13 +01:00
|
|
|
*
|
2011-04-16 23:26:41 +02:00
|
|
|
* REINDEX_REL_PROCESS_TOAST: if true, process the toast table too (if any).
|
|
|
|
*
|
|
|
|
* REINDEX_REL_SUPPRESS_INDEX_USE: if true, the relation was just completely
|
2011-01-21 04:44:10 +01:00
|
|
|
* rebuilt by an operation such as VACUUM FULL or CLUSTER, and therefore its
|
|
|
|
* indexes are inconsistent with it. This makes things tricky if the relation
|
|
|
|
* is a system catalog that we might consult during the reindexing. To deal
|
|
|
|
* with that case, we mark all of the indexes as pending rebuild so that they
|
|
|
|
* won't be trusted until rebuilt. The caller is required to call us *without*
|
2011-04-16 23:26:41 +02:00
|
|
|
* having made the rebuilt table visible by doing CommandCounterIncrement;
|
2011-01-21 04:44:10 +01:00
|
|
|
* we'll do CCI after having collected the index list. (This way we can still
|
|
|
|
* use catalog indexes while collecting the list.)
|
|
|
|
*
|
2011-04-16 23:26:41 +02:00
|
|
|
* REINDEX_REL_CHECK_CONSTRAINTS: if true, recheck unique and exclusion
|
|
|
|
* constraint conditions, else don't. To avoid deadlocks, VACUUM FULL or
|
|
|
|
* CLUSTER on a system catalog must omit this flag. REINDEX should be used to
|
|
|
|
* rebuild an index if constraint inconsistency is suspected. For optimal
|
|
|
|
* performance, other callers should include the flag only after transforming
|
|
|
|
* the data in a manner that risks a change in constraint validity.
|
2010-02-07 23:40:33 +01:00
|
|
|
*
|
2014-11-15 05:19:49 +01:00
|
|
|
* REINDEX_REL_FORCE_INDEXES_UNLOGGED: if true, set the persistence of the
|
|
|
|
* rebuilt indexes to unlogged.
|
|
|
|
*
|
2015-04-08 13:55:43 +02:00
|
|
|
* REINDEX_REL_FORCE_INDEXES_PERMANENT: if true, set the persistence of the
|
2014-11-15 05:19:49 +01:00
|
|
|
* rebuilt indexes to permanent.
|
|
|
|
*
|
2011-04-16 23:26:41 +02:00
|
|
|
* Returns true if any indexes were rebuilt (including toast table's index
|
2014-05-06 18:12:18 +02:00
|
|
|
* when relevant). Note that a CommandCounterIncrement will occur after each
|
2011-04-16 23:26:41 +02:00
|
|
|
* index rebuild.
|
2000-02-18 10:30:20 +01:00
|
|
|
*/
|
|
|
|
bool
|
2015-05-15 13:09:57 +02:00
|
|
|
reindex_relation(Oid relid, int flags, int options)
|
2000-02-18 10:30:20 +01:00
|
|
|
{
|
2001-03-22 05:01:46 +01:00
|
|
|
Relation rel;
|
2003-09-24 20:54:02 +02:00
|
|
|
Oid toast_relid;
|
2010-02-07 21:48:13 +01:00
|
|
|
List *indexIds;
|
2020-04-21 21:58:42 +02:00
|
|
|
char persistence;
|
2003-09-24 20:54:02 +02:00
|
|
|
bool result;
|
2020-04-21 21:58:42 +02:00
|
|
|
ListCell *indexId;
|
2019-03-25 15:59:04 +01:00
|
|
|
int i;
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2001-08-18 01:50:00 +02:00
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Open and lock the relation. ShareLock is sufficient since we only need
|
2011-07-09 04:19:30 +02:00
|
|
|
* to prevent schema and data changes in it. The lock level used here
|
|
|
|
* should match ReindexTable().
|
2001-08-18 01:50:00 +02:00
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
rel = table_open(relid, ShareLock);
|
2001-10-25 07:50:21 +02:00
|
|
|
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
/*
|
|
|
|
* This may be useful when implemented someday; but that day is not today.
|
|
|
|
* For now, avoid erroring out when called in a multi-table context
|
|
|
|
* (REINDEX SCHEMA) and happen to come across a partitioned table. The
|
|
|
|
* partitions may be reindexed on their own anyway.
|
|
|
|
*/
|
|
|
|
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
|
|
|
|
{
|
|
|
|
ereport(WARNING,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("REINDEX of partitioned tables is not yet implemented, skipping \"%s\"",
|
|
|
|
RelationGetRelationName(rel))));
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(rel, ShareLock);
|
Local partitioned indexes
When CREATE INDEX is run on a partitioned table, create catalog entries
for an index on the partitioned table (which is just a placeholder since
the table proper has no data of its own), and recurse to create actual
indexes on the existing partitions; create them in future partitions
also.
As a convenience gadget, if the new index definition matches some
existing index in partitions, these are picked up and used instead of
creating new ones. Whichever way these indexes come about, they become
attached to the index on the parent table and are dropped alongside it,
and cannot be dropped on isolation unless they are detached first.
To support pg_dump'ing these indexes, add commands
CREATE INDEX ON ONLY <table>
(which creates the index on the parent partitioned table, without
recursing) and
ALTER INDEX ATTACH PARTITION
(which is used after the indexes have been created individually on each
partition, to attach them to the parent index). These reconstruct prior
database state exactly.
Reviewed-by: (in alphabetical order) Peter Eisentraut, Robert Haas, Amit
Langote, Jesper Pedersen, Simon Riggs, David Rowley
Discussion: https://postgr.es/m/20171113170646.gzweigyrgg6pwsg4@alvherre.pgsql
2018-01-19 15:49:22 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2003-09-24 20:54:02 +02:00
|
|
|
toast_relid = rel->rd_rel->reltoastrelid;
|
|
|
|
|
2000-12-08 07:17:58 +01:00
|
|
|
/*
|
2003-09-24 20:54:02 +02:00
|
|
|
* Get the list of index OIDs for this relation. (We trust to the
|
|
|
|
* relcache to get this with a sequential scan if ignoring system
|
|
|
|
* indexes.)
|
2001-03-22 05:01:46 +01:00
|
|
|
*/
|
2003-09-24 20:54:02 +02:00
|
|
|
indexIds = RelationGetIndexList(rel);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
if (flags & REINDEX_REL_SUPPRESS_INDEX_USE)
|
2001-02-23 10:26:14 +01:00
|
|
|
{
|
2020-04-21 21:58:42 +02:00
|
|
|
/* Suppress use of all the indexes until they are rebuilt */
|
|
|
|
SetReindexPending(indexIds);
|
2001-10-25 07:50:21 +02:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
/*
|
|
|
|
* Make the new heap contents visible --- now things might be
|
|
|
|
* inconsistent!
|
|
|
|
*/
|
|
|
|
CommandCounterIncrement();
|
|
|
|
}
|
2002-09-23 02:42:48 +02:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
/*
|
|
|
|
* Compute persistence of indexes: same as that of owning rel, unless
|
|
|
|
* caller specified otherwise.
|
|
|
|
*/
|
|
|
|
if (flags & REINDEX_REL_FORCE_INDEXES_UNLOGGED)
|
|
|
|
persistence = RELPERSISTENCE_UNLOGGED;
|
|
|
|
else if (flags & REINDEX_REL_FORCE_INDEXES_PERMANENT)
|
|
|
|
persistence = RELPERSISTENCE_PERMANENT;
|
|
|
|
else
|
|
|
|
persistence = rel->rd_rel->relpersistence;
|
|
|
|
|
|
|
|
/* Reindex all the indexes. */
|
|
|
|
i = 1;
|
|
|
|
foreach(indexId, indexIds)
|
|
|
|
{
|
|
|
|
Oid indexOid = lfirst_oid(indexId);
|
|
|
|
Oid indexNamespaceId = get_rel_namespace(indexOid);
|
2003-09-24 20:54:02 +02:00
|
|
|
|
2014-11-15 05:19:49 +01:00
|
|
|
/*
|
2020-04-21 21:58:42 +02:00
|
|
|
* Skip any invalid indexes on a TOAST table. These can only be
|
|
|
|
* duplicate leftovers from a failed REINDEX CONCURRENTLY, and if
|
|
|
|
* rebuilt it would not be possible to drop them anymore.
|
2014-11-15 05:19:49 +01:00
|
|
|
*/
|
2020-04-21 21:58:42 +02:00
|
|
|
if (IsToastNamespace(indexNamespaceId) &&
|
|
|
|
!get_index_isvalid(indexOid))
|
2010-02-07 21:48:13 +01:00
|
|
|
{
|
2020-04-21 21:58:42 +02:00
|
|
|
ereport(WARNING,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("cannot reindex invalid index \"%s.%s\" on TOAST table, skipping",
|
|
|
|
get_namespace_name(indexNamespaceId),
|
|
|
|
get_rel_name(indexOid))));
|
|
|
|
continue;
|
|
|
|
}
|
2010-02-07 21:48:13 +01:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
reindex_index(indexOid, !(flags & REINDEX_REL_CHECK_CONSTRAINTS),
|
|
|
|
persistence, options);
|
2010-02-07 21:48:13 +01:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
CommandCounterIncrement();
|
2010-02-07 21:48:13 +01:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
/* Index should no longer be in the pending list */
|
|
|
|
Assert(!ReindexIsProcessingIndex(indexOid));
|
2010-02-07 21:48:13 +01:00
|
|
|
|
2020-04-21 21:58:42 +02:00
|
|
|
/* Set index rebuild count */
|
|
|
|
pgstat_progress_update_param(PROGRESS_CLUSTER_INDEX_REBUILD_COUNT,
|
|
|
|
i);
|
|
|
|
i++;
|
2010-02-07 21:48:13 +01:00
|
|
|
}
|
2003-09-23 03:51:09 +02:00
|
|
|
|
2002-09-23 02:42:48 +02:00
|
|
|
/*
|
2003-09-24 20:54:02 +02:00
|
|
|
* Close rel, but continue to hold the lock.
|
2002-09-23 02:42:48 +02:00
|
|
|
*/
|
2019-01-21 19:32:19 +01:00
|
|
|
table_close(rel, NoLock);
|
2002-09-23 02:42:48 +02:00
|
|
|
|
2003-09-24 20:54:02 +02:00
|
|
|
result = (indexIds != NIL);
|
2001-03-22 05:01:46 +01:00
|
|
|
|
2003-09-24 20:54:02 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If the relation has a secondary toast rel, reindex that too while we
|
2020-06-15 19:14:40 +02:00
|
|
|
* still hold the lock on the main table.
|
2003-09-24 20:54:02 +02:00
|
|
|
*/
|
2011-04-16 23:26:41 +02:00
|
|
|
if ((flags & REINDEX_REL_PROCESS_TOAST) && OidIsValid(toast_relid))
|
2015-05-15 13:09:57 +02:00
|
|
|
result |= reindex_relation(toast_relid, flags, options);
|
2000-12-08 07:17:58 +01:00
|
|
|
|
2003-09-24 20:54:02 +02:00
|
|
|
return result;
|
2000-02-18 10:30:20 +01:00
|
|
|
}
|
2010-02-07 21:48:13 +01:00
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* System index reindexing support
|
|
|
|
*
|
|
|
|
* When we are busy reindexing a system index, this code provides support
|
2011-06-06 04:30:04 +02:00
|
|
|
* for preventing catalog lookups from using that index. We also make use
|
|
|
|
* of this to catch attempted uses of user indexes during reindexing of
|
2018-01-19 13:48:44 +01:00
|
|
|
* those indexes. This information is propagated to parallel workers;
|
|
|
|
* attempting to change it during a parallel operation is not permitted.
|
2010-02-07 21:48:13 +01:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
static Oid currentlyReindexedHeap = InvalidOid;
|
|
|
|
static Oid currentlyReindexedIndex = InvalidOid;
|
|
|
|
static List *pendingReindexedIndexes = NIL;
|
2020-04-21 21:58:42 +02:00
|
|
|
static int reindexingNestLevel = 0;
|
2010-02-07 21:48:13 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ReindexIsProcessingHeap
|
|
|
|
* True if heap specified by OID is currently being reindexed.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
ReindexIsProcessingHeap(Oid heapOid)
|
|
|
|
{
|
|
|
|
return heapOid == currentlyReindexedHeap;
|
|
|
|
}
|
|
|
|
|
2011-06-06 04:30:04 +02:00
|
|
|
/*
|
|
|
|
* ReindexIsCurrentlyProcessingIndex
|
|
|
|
* True if index specified by OID is currently being reindexed.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
ReindexIsCurrentlyProcessingIndex(Oid indexOid)
|
|
|
|
{
|
|
|
|
return indexOid == currentlyReindexedIndex;
|
|
|
|
}
|
|
|
|
|
2010-02-07 21:48:13 +01:00
|
|
|
/*
|
|
|
|
* ReindexIsProcessingIndex
|
|
|
|
* True if index specified by OID is currently being reindexed,
|
|
|
|
* or should be treated as invalid because it is awaiting reindex.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
ReindexIsProcessingIndex(Oid indexOid)
|
|
|
|
{
|
|
|
|
return indexOid == currentlyReindexedIndex ||
|
|
|
|
list_member_oid(pendingReindexedIndexes, indexOid);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SetReindexProcessing
|
|
|
|
* Set flag that specified heap/index are being reindexed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
SetReindexProcessing(Oid heapOid, Oid indexOid)
|
|
|
|
{
|
|
|
|
Assert(OidIsValid(heapOid) && OidIsValid(indexOid));
|
|
|
|
/* Reindexing is not re-entrant. */
|
|
|
|
if (OidIsValid(currentlyReindexedHeap))
|
|
|
|
elog(ERROR, "cannot reindex while reindexing");
|
|
|
|
currentlyReindexedHeap = heapOid;
|
|
|
|
currentlyReindexedIndex = indexOid;
|
2011-06-06 04:30:04 +02:00
|
|
|
/* Index is no longer "pending" reindex. */
|
|
|
|
RemoveReindexPending(indexOid);
|
2020-04-21 21:58:42 +02:00
|
|
|
/* This may have been set already, but in case it isn't, do so now. */
|
|
|
|
reindexingNestLevel = GetCurrentTransactionNestLevel();
|
2010-02-07 21:48:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ResetReindexProcessing
|
|
|
|
* Unset reindexing status.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ResetReindexProcessing(void)
|
|
|
|
{
|
|
|
|
currentlyReindexedHeap = InvalidOid;
|
|
|
|
currentlyReindexedIndex = InvalidOid;
|
2020-04-21 21:58:42 +02:00
|
|
|
/* reindexingNestLevel remains set till end of (sub)transaction */
|
2010-02-07 21:48:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SetReindexPending
|
|
|
|
* Mark the given indexes as pending reindex.
|
|
|
|
*
|
2020-04-21 21:58:42 +02:00
|
|
|
* NB: we assume that the current memory context stays valid throughout.
|
2010-02-07 21:48:13 +01:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
SetReindexPending(List *indexes)
|
|
|
|
{
|
|
|
|
/* Reindexing is not re-entrant. */
|
|
|
|
if (pendingReindexedIndexes)
|
|
|
|
elog(ERROR, "cannot reindex while reindexing");
|
2018-01-19 13:48:44 +01:00
|
|
|
if (IsInParallelMode())
|
|
|
|
elog(ERROR, "cannot modify reindex state during a parallel operation");
|
2010-02-07 21:48:13 +01:00
|
|
|
pendingReindexedIndexes = list_copy(indexes);
|
2020-04-21 21:58:42 +02:00
|
|
|
reindexingNestLevel = GetCurrentTransactionNestLevel();
|
2010-02-07 21:48:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RemoveReindexPending
|
|
|
|
* Remove the given index from the pending list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
RemoveReindexPending(Oid indexOid)
|
|
|
|
{
|
2018-01-19 13:48:44 +01:00
|
|
|
if (IsInParallelMode())
|
|
|
|
elog(ERROR, "cannot modify reindex state during a parallel operation");
|
2010-02-07 21:48:13 +01:00
|
|
|
pendingReindexedIndexes = list_delete_oid(pendingReindexedIndexes,
|
|
|
|
indexOid);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-04-21 21:58:42 +02:00
|
|
|
* ResetReindexState
|
|
|
|
* Clear all reindexing state during (sub)transaction abort.
|
2010-02-07 21:48:13 +01:00
|
|
|
*/
|
2020-04-21 21:58:42 +02:00
|
|
|
void
|
|
|
|
ResetReindexState(int nestLevel)
|
2010-02-07 21:48:13 +01:00
|
|
|
{
|
2020-04-21 21:58:42 +02:00
|
|
|
/*
|
|
|
|
* Because reindexing is not re-entrant, we don't need to cope with nested
|
|
|
|
* reindexing states. We just need to avoid messing up the outer-level
|
|
|
|
* state in case a subtransaction fails within a REINDEX. So checking the
|
|
|
|
* current nest level against that of the reindex operation is sufficient.
|
|
|
|
*/
|
|
|
|
if (reindexingNestLevel >= nestLevel)
|
|
|
|
{
|
|
|
|
currentlyReindexedHeap = InvalidOid;
|
|
|
|
currentlyReindexedIndex = InvalidOid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We needn't try to release the contents of pendingReindexedIndexes;
|
|
|
|
* that list should be in a transaction-lifespan context, so it will
|
|
|
|
* go away automatically.
|
|
|
|
*/
|
|
|
|
pendingReindexedIndexes = NIL;
|
|
|
|
|
|
|
|
reindexingNestLevel = 0;
|
|
|
|
}
|
2010-02-07 21:48:13 +01:00
|
|
|
}
|
2018-01-19 13:48:44 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* EstimateReindexStateSpace
|
|
|
|
* Estimate space needed to pass reindex state to parallel workers.
|
|
|
|
*/
|
2018-02-19 18:07:44 +01:00
|
|
|
Size
|
2018-01-19 13:48:44 +01:00
|
|
|
EstimateReindexStateSpace(void)
|
|
|
|
{
|
|
|
|
return offsetof(SerializedReindexState, pendingReindexedIndexes)
|
|
|
|
+ mul_size(sizeof(Oid), list_length(pendingReindexedIndexes));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SerializeReindexState
|
|
|
|
* Serialize reindex state for parallel workers.
|
|
|
|
*/
|
2018-02-19 18:07:44 +01:00
|
|
|
void
|
2018-01-19 13:48:44 +01:00
|
|
|
SerializeReindexState(Size maxsize, char *start_address)
|
|
|
|
{
|
|
|
|
SerializedReindexState *sistate = (SerializedReindexState *) start_address;
|
|
|
|
int c = 0;
|
|
|
|
ListCell *lc;
|
|
|
|
|
|
|
|
sistate->currentlyReindexedHeap = currentlyReindexedHeap;
|
|
|
|
sistate->currentlyReindexedIndex = currentlyReindexedIndex;
|
|
|
|
sistate->numPendingReindexedIndexes = list_length(pendingReindexedIndexes);
|
|
|
|
foreach(lc, pendingReindexedIndexes)
|
|
|
|
sistate->pendingReindexedIndexes[c++] = lfirst_oid(lc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RestoreReindexState
|
|
|
|
* Restore reindex state in a parallel worker.
|
|
|
|
*/
|
2018-02-19 18:07:44 +01:00
|
|
|
void
|
2018-01-19 13:48:44 +01:00
|
|
|
RestoreReindexState(void *reindexstate)
|
|
|
|
{
|
|
|
|
SerializedReindexState *sistate = (SerializedReindexState *) reindexstate;
|
|
|
|
int c = 0;
|
2018-04-26 20:47:16 +02:00
|
|
|
MemoryContext oldcontext;
|
2018-01-19 13:48:44 +01:00
|
|
|
|
|
|
|
currentlyReindexedHeap = sistate->currentlyReindexedHeap;
|
|
|
|
currentlyReindexedIndex = sistate->currentlyReindexedIndex;
|
|
|
|
|
|
|
|
Assert(pendingReindexedIndexes == NIL);
|
|
|
|
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
|
|
|
|
for (c = 0; c < sistate->numPendingReindexedIndexes; ++c)
|
|
|
|
pendingReindexedIndexes =
|
|
|
|
lappend_oid(pendingReindexedIndexes,
|
|
|
|
sistate->pendingReindexedIndexes[c]);
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
2020-04-21 21:58:42 +02:00
|
|
|
|
|
|
|
/* Note the worker has its own transaction nesting level */
|
|
|
|
reindexingNestLevel = GetCurrentTransactionNestLevel();
|
2018-01-19 13:48:44 +01:00
|
|
|
}
|