2013-03-04 01:23:31 +01:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* matview.c
|
|
|
|
* materialized view support
|
|
|
|
*
|
2018-01-03 05:30:12 +01:00
|
|
|
* Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
|
2013-03-04 01:23:31 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
|
|
|
* src/backend/commands/matview.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
2013-05-06 19:26:51 +02:00
|
|
|
#include "access/htup_details.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "access/multixact.h"
|
|
|
|
#include "access/xact.h"
|
2014-11-06 12:52:08 +01:00
|
|
|
#include "access/xlog.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "catalog/catalog.h"
|
2013-05-06 19:26:51 +02:00
|
|
|
#include "catalog/indexing.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "catalog/namespace.h"
|
2018-03-19 23:49:53 +01:00
|
|
|
#include "catalog/pg_am.h"
|
|
|
|
#include "catalog/pg_opclass.h"
|
2013-07-16 19:55:44 +02:00
|
|
|
#include "catalog/pg_operator.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "commands/cluster.h"
|
|
|
|
#include "commands/matview.h"
|
|
|
|
#include "commands/tablecmds.h"
|
2013-07-16 19:55:44 +02:00
|
|
|
#include "commands/tablespace.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "executor/executor.h"
|
2013-07-16 19:55:44 +02:00
|
|
|
#include "executor/spi.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "miscadmin.h"
|
2013-07-16 19:55:44 +02:00
|
|
|
#include "parser/parse_relation.h"
|
2017-03-18 22:49:06 +01:00
|
|
|
#include "pgstat.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "rewrite/rewriteHandler.h"
|
2013-11-05 22:36:33 +01:00
|
|
|
#include "storage/lmgr.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "storage/smgr.h"
|
|
|
|
#include "tcop/tcopprot.h"
|
2013-07-16 19:55:44 +02:00
|
|
|
#include "utils/builtins.h"
|
|
|
|
#include "utils/lsyscache.h"
|
2013-05-06 19:26:51 +02:00
|
|
|
#include "utils/rel.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
#include "utils/snapmgr.h"
|
2013-05-06 19:26:51 +02:00
|
|
|
#include "utils/syscache.h"
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
DestReceiver pub; /* publicly-known function pointers */
|
|
|
|
Oid transientoid; /* OID of new heap into which to store */
|
|
|
|
/* These fields are filled by transientrel_startup: */
|
|
|
|
Relation transientrel; /* relation to write to */
|
|
|
|
CommandId output_cid; /* cmin to insert in output tuples */
|
|
|
|
int hi_options; /* heap_insert performance options */
|
|
|
|
BulkInsertState bistate; /* bulk insert state */
|
|
|
|
} DR_transientrel;
|
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
static int matview_maintenance_depth = 0;
|
|
|
|
|
2013-03-04 01:23:31 +01:00
|
|
|
static void transientrel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
|
2016-06-06 20:52:58 +02:00
|
|
|
static bool transientrel_receive(TupleTableSlot *slot, DestReceiver *self);
|
2013-03-04 01:23:31 +01:00
|
|
|
static void transientrel_shutdown(DestReceiver *self);
|
|
|
|
static void transientrel_destroy(DestReceiver *self);
|
2017-03-18 22:49:06 +01:00
|
|
|
static uint64 refresh_matview_datafill(DestReceiver *dest, Query *query,
|
2014-08-26 16:56:26 +02:00
|
|
|
const char *queryString);
|
2013-07-16 19:55:44 +02:00
|
|
|
static char *make_temptable_name_n(char *tempname, int n);
|
2014-08-26 16:56:26 +02:00
|
|
|
static void refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
|
2015-05-24 03:35:49 +02:00
|
|
|
int save_sec_context);
|
2014-11-15 05:19:49 +01:00
|
|
|
static void refresh_by_heap_swap(Oid matviewOid, Oid OIDNewHeap, char relpersistence);
|
2018-03-19 23:49:53 +01:00
|
|
|
static bool is_usable_unique_index(Relation indexRel);
|
2013-07-16 19:55:44 +02:00
|
|
|
static void OpenMatViewIncrementalMaintenance(void);
|
|
|
|
static void CloseMatViewIncrementalMaintenance(void);
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
/*
|
2013-05-06 19:26:51 +02:00
|
|
|
* SetMatViewPopulatedState
|
|
|
|
* Mark a materialized view as populated, or not.
|
2013-03-04 01:23:31 +01:00
|
|
|
*
|
|
|
|
* NOTE: caller must be holding an appropriate lock on the relation.
|
|
|
|
*/
|
|
|
|
void
|
2013-05-06 19:26:51 +02:00
|
|
|
SetMatViewPopulatedState(Relation relation, bool newstate)
|
2013-03-04 01:23:31 +01:00
|
|
|
{
|
2013-05-06 19:26:51 +02:00
|
|
|
Relation pgrel;
|
|
|
|
HeapTuple tuple;
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
Assert(relation->rd_rel->relkind == RELKIND_MATVIEW);
|
2013-03-07 00:15:34 +01:00
|
|
|
|
2013-05-06 19:26:51 +02:00
|
|
|
/*
|
|
|
|
* Update relation's pg_class entry. Crucial side-effect: other backends
|
|
|
|
* (and this one too!) are sent SI message to make them rebuild relcache
|
|
|
|
* entries.
|
|
|
|
*/
|
|
|
|
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
|
|
|
|
tuple = SearchSysCacheCopy1(RELOID,
|
|
|
|
ObjectIdGetDatum(RelationGetRelid(relation)));
|
|
|
|
if (!HeapTupleIsValid(tuple))
|
|
|
|
elog(ERROR, "cache lookup failed for relation %u",
|
|
|
|
RelationGetRelid(relation));
|
2013-03-07 00:15:34 +01:00
|
|
|
|
2013-05-06 19:26:51 +02:00
|
|
|
((Form_pg_class) GETSTRUCT(tuple))->relispopulated = newstate;
|
2013-03-22 14:54:07 +01:00
|
|
|
|
2017-01-31 22:42:24 +01:00
|
|
|
CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
|
2013-03-04 01:23:31 +01:00
|
|
|
|
2013-05-06 19:26:51 +02:00
|
|
|
heap_freetuple(tuple);
|
|
|
|
heap_close(pgrel, RowExclusiveLock);
|
2013-03-04 01:23:31 +01:00
|
|
|
|
2013-05-06 19:26:51 +02:00
|
|
|
/*
|
|
|
|
* Advance command counter to make the updated pg_class row locally
|
|
|
|
* visible.
|
|
|
|
*/
|
|
|
|
CommandCounterIncrement();
|
2013-03-04 01:23:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ExecRefreshMatView -- execute a REFRESH MATERIALIZED VIEW command
|
|
|
|
*
|
|
|
|
* This refreshes the materialized view by creating a new table and swapping
|
|
|
|
* the relfilenodes of the new table and the old materialized view, so the OID
|
|
|
|
* of the original materialized view is preserved. Thus we do not lose GRANT
|
|
|
|
* nor references to this materialized view.
|
|
|
|
*
|
|
|
|
* If WITH NO DATA was specified, this is effectively like a TRUNCATE;
|
|
|
|
* otherwise it is like a TRUNCATE followed by an INSERT using the SELECT
|
|
|
|
* statement associated with the materialized view. The statement node's
|
2013-05-06 19:26:51 +02:00
|
|
|
* skipData field shows whether the clause was used.
|
2013-03-04 01:23:31 +01:00
|
|
|
*
|
|
|
|
* Indexes are rebuilt too, via REINDEX. Since we are effectively bulk-loading
|
|
|
|
* the new heap, it's better to create the indexes afterwards than to fill them
|
|
|
|
* incrementally while we load.
|
|
|
|
*
|
2013-05-06 19:26:51 +02:00
|
|
|
* The matview's "populated" state is changed based on whether the contents
|
|
|
|
* reflect the result set of the materialized view's query.
|
2013-03-04 01:23:31 +01:00
|
|
|
*/
|
Change many routines to return ObjectAddress rather than OID
The changed routines are mostly those that can be directly called by
ProcessUtilitySlow; the intention is to make the affected object
information more precise, in support for future event trigger changes.
Originally it was envisioned that the OID of the affected object would
be enough, and in most cases that is correct, but upon actually
implementing the event trigger changes it turned out that ObjectAddress
is more widely useful.
Additionally, some command execution routines grew an output argument
that's an object address which provides further info about the executed
command. To wit:
* for ALTER DOMAIN / ADD CONSTRAINT, it corresponds to the address of
the new constraint
* for ALTER OBJECT / SET SCHEMA, it corresponds to the address of the
schema that originally contained the object.
* for ALTER EXTENSION {ADD, DROP} OBJECT, it corresponds to the address
of the object added to or dropped from the extension.
There's no user-visible change in this commit, and no functional change
either.
Discussion: 20150218213255.GC6717@tamriel.snowman.net
Reviewed-By: Stephen Frost, Andres Freund
2015-03-03 18:10:50 +01:00
|
|
|
ObjectAddress
|
2013-03-04 01:23:31 +01:00
|
|
|
ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
|
2013-05-29 22:58:43 +02:00
|
|
|
ParamListInfo params, char *completionTag)
|
2013-03-04 01:23:31 +01:00
|
|
|
{
|
|
|
|
Oid matviewOid;
|
|
|
|
Relation matviewRel;
|
|
|
|
RewriteRule *rule;
|
|
|
|
List *actions;
|
|
|
|
Query *dataQuery;
|
|
|
|
Oid tableSpace;
|
2014-08-26 16:56:26 +02:00
|
|
|
Oid relowner;
|
2013-03-04 01:23:31 +01:00
|
|
|
Oid OIDNewHeap;
|
|
|
|
DestReceiver *dest;
|
2017-03-18 22:49:06 +01:00
|
|
|
uint64 processed = 0;
|
2013-07-16 19:55:44 +02:00
|
|
|
bool concurrent;
|
|
|
|
LOCKMODE lockmode;
|
2014-08-22 20:27:00 +02:00
|
|
|
char relpersistence;
|
2014-08-26 16:56:26 +02:00
|
|
|
Oid save_userid;
|
|
|
|
int save_sec_context;
|
|
|
|
int save_nestlevel;
|
Change many routines to return ObjectAddress rather than OID
The changed routines are mostly those that can be directly called by
ProcessUtilitySlow; the intention is to make the affected object
information more precise, in support for future event trigger changes.
Originally it was envisioned that the OID of the affected object would
be enough, and in most cases that is correct, but upon actually
implementing the event trigger changes it turned out that ObjectAddress
is more widely useful.
Additionally, some command execution routines grew an output argument
that's an object address which provides further info about the executed
command. To wit:
* for ALTER DOMAIN / ADD CONSTRAINT, it corresponds to the address of
the new constraint
* for ALTER OBJECT / SET SCHEMA, it corresponds to the address of the
schema that originally contained the object.
* for ALTER EXTENSION {ADD, DROP} OBJECT, it corresponds to the address
of the object added to or dropped from the extension.
There's no user-visible change in this commit, and no functional change
either.
Discussion: 20150218213255.GC6717@tamriel.snowman.net
Reviewed-By: Stephen Frost, Andres Freund
2015-03-03 18:10:50 +01:00
|
|
|
ObjectAddress address;
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
/* Determine strength of lock needed. */
|
|
|
|
concurrent = stmt->concurrent;
|
|
|
|
lockmode = concurrent ? ExclusiveLock : AccessExclusiveLock;
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a lock until end of transaction.
|
|
|
|
*/
|
|
|
|
matviewOid = RangeVarGetRelidExtended(stmt->relation,
|
2018-03-31 01:33:42 +02:00
|
|
|
lockmode, 0,
|
2013-05-29 22:58:43 +02:00
|
|
|
RangeVarCallbackOwnsTable, NULL);
|
2013-03-04 01:23:31 +01:00
|
|
|
matviewRel = heap_open(matviewOid, NoLock);
|
|
|
|
|
|
|
|
/* Make sure it is a materialized view. */
|
|
|
|
if (matviewRel->rd_rel->relkind != RELKIND_MATVIEW)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("\"%s\" is not a materialized view",
|
|
|
|
RelationGetRelationName(matviewRel))));
|
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
/* Check that CONCURRENTLY is not specified if not populated. */
|
|
|
|
if (concurrent && !RelationIsPopulated(matviewRel))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("CONCURRENTLY cannot be used when the materialized view is not populated")));
|
|
|
|
|
|
|
|
/* Check that conflicting options have not been specified. */
|
|
|
|
if (concurrent && stmt->skipData)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SYNTAX_ERROR),
|
|
|
|
errmsg("CONCURRENTLY and WITH NO DATA options cannot be used together")));
|
|
|
|
|
|
|
|
/* We don't allow an oid column for a materialized view. */
|
2013-03-04 01:23:31 +01:00
|
|
|
Assert(!matviewRel->rd_rel->relhasoids);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that everything is correct for a refresh. Problems at this point
|
|
|
|
* are internal errors, so elog is sufficient.
|
|
|
|
*/
|
|
|
|
if (matviewRel->rd_rel->relhasrules == false ||
|
|
|
|
matviewRel->rd_rules->numLocks < 1)
|
|
|
|
elog(ERROR,
|
|
|
|
"materialized view \"%s\" is missing rewrite information",
|
|
|
|
RelationGetRelationName(matviewRel));
|
|
|
|
|
|
|
|
if (matviewRel->rd_rules->numLocks > 1)
|
|
|
|
elog(ERROR,
|
|
|
|
"materialized view \"%s\" has too many rules",
|
|
|
|
RelationGetRelationName(matviewRel));
|
|
|
|
|
|
|
|
rule = matviewRel->rd_rules->rules[0];
|
|
|
|
if (rule->event != CMD_SELECT || !(rule->isInstead))
|
|
|
|
elog(ERROR,
|
|
|
|
"the rule for materialized view \"%s\" is not a SELECT INSTEAD OF rule",
|
|
|
|
RelationGetRelationName(matviewRel));
|
|
|
|
|
|
|
|
actions = rule->actions;
|
|
|
|
if (list_length(actions) != 1)
|
|
|
|
elog(ERROR,
|
|
|
|
"the rule for materialized view \"%s\" is not a single action",
|
|
|
|
RelationGetRelationName(matviewRel));
|
|
|
|
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
/*
|
2016-06-10 00:02:36 +02:00
|
|
|
* Check that there is a unique index with no WHERE clause on one or more
|
|
|
|
* columns of the materialized view if CONCURRENTLY is specified.
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
*/
|
|
|
|
if (concurrent)
|
|
|
|
{
|
2016-06-10 00:02:36 +02:00
|
|
|
List *indexoidlist = RelationGetIndexList(matviewRel);
|
|
|
|
ListCell *indexoidscan;
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
bool hasUniqueIndex = false;
|
|
|
|
|
|
|
|
foreach(indexoidscan, indexoidlist)
|
|
|
|
{
|
|
|
|
Oid indexoid = lfirst_oid(indexoidscan);
|
|
|
|
Relation indexRel;
|
|
|
|
|
|
|
|
indexRel = index_open(indexoid, AccessShareLock);
|
2018-03-19 23:49:53 +01:00
|
|
|
hasUniqueIndex = is_usable_unique_index(indexRel);
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
index_close(indexRel, AccessShareLock);
|
2018-03-19 23:49:53 +01:00
|
|
|
if (hasUniqueIndex)
|
|
|
|
break;
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
list_free(indexoidlist);
|
|
|
|
|
|
|
|
if (!hasUniqueIndex)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
errmsg("cannot refresh materialized view \"%s\" concurrently",
|
|
|
|
quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
|
|
|
|
RelationGetRelationName(matviewRel))),
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view.")));
|
|
|
|
}
|
|
|
|
|
2013-03-04 01:23:31 +01:00
|
|
|
/*
|
|
|
|
* The stored query was rewritten at the time of the MV definition, but
|
|
|
|
* has not been scribbled on by the planner.
|
|
|
|
*/
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 19:51:29 +02:00
|
|
|
dataQuery = linitial_node(Query, actions);
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for active uses of the relation in the current transaction, such
|
|
|
|
* as open scans.
|
|
|
|
*
|
|
|
|
* NB: We count on this to protect us against problems with refreshing the
|
|
|
|
* data using HEAP_INSERT_FROZEN.
|
|
|
|
*/
|
|
|
|
CheckTableNotInUse(matviewRel, "REFRESH MATERIALIZED VIEW");
|
|
|
|
|
2013-05-06 19:26:51 +02:00
|
|
|
/*
|
|
|
|
* Tentatively mark the matview as populated or not (this will roll back
|
|
|
|
* if we fail later).
|
|
|
|
*/
|
|
|
|
SetMatViewPopulatedState(matviewRel, !stmt->skipData);
|
|
|
|
|
2014-08-26 16:56:26 +02:00
|
|
|
relowner = matviewRel->rd_rel->relowner;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch to the owner's userid, so that any functions are run as that
|
|
|
|
* user. Also arrange to make GUC variable changes local to this command.
|
|
|
|
* Don't lock it down too tight to create a temporary table just yet. We
|
|
|
|
* will switch modes when we are about to execute user code.
|
|
|
|
*/
|
|
|
|
GetUserIdAndSecContext(&save_userid, &save_sec_context);
|
|
|
|
SetUserIdAndSecContext(relowner,
|
|
|
|
save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
|
|
|
|
save_nestlevel = NewGUCNestLevel();
|
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
/* Concurrent refresh builds new data in temp tablespace, and does diff. */
|
|
|
|
if (concurrent)
|
2014-08-22 20:27:00 +02:00
|
|
|
{
|
2013-07-16 19:55:44 +02:00
|
|
|
tableSpace = GetDefaultTablespace(RELPERSISTENCE_TEMP);
|
2014-08-22 20:27:00 +02:00
|
|
|
relpersistence = RELPERSISTENCE_TEMP;
|
|
|
|
}
|
2013-07-16 19:55:44 +02:00
|
|
|
else
|
2014-08-22 20:27:00 +02:00
|
|
|
{
|
2013-07-16 19:55:44 +02:00
|
|
|
tableSpace = matviewRel->rd_rel->reltablespace;
|
2014-08-22 20:27:00 +02:00
|
|
|
relpersistence = matviewRel->rd_rel->relpersistence;
|
|
|
|
}
|
2013-03-04 01:23:31 +01:00
|
|
|
|
2013-11-05 22:36:33 +01:00
|
|
|
/*
|
2014-05-06 18:12:18 +02:00
|
|
|
* Create the transient table that will receive the regenerated data. Lock
|
|
|
|
* it against access by any other process until commit (by which time it
|
|
|
|
* will be gone).
|
2013-11-05 22:36:33 +01:00
|
|
|
*/
|
2014-08-22 20:27:00 +02:00
|
|
|
OIDNewHeap = make_new_heap(matviewOid, tableSpace, relpersistence,
|
2013-07-16 19:55:44 +02:00
|
|
|
ExclusiveLock);
|
2013-11-05 22:36:33 +01:00
|
|
|
LockRelationOid(OIDNewHeap, AccessExclusiveLock);
|
2013-03-04 01:23:31 +01:00
|
|
|
dest = CreateTransientRelDestReceiver(OIDNewHeap);
|
|
|
|
|
2014-08-26 16:56:26 +02:00
|
|
|
/*
|
|
|
|
* Now lock down security-restricted operations.
|
|
|
|
*/
|
|
|
|
SetUserIdAndSecContext(relowner,
|
|
|
|
save_sec_context | SECURITY_RESTRICTED_OPERATION);
|
|
|
|
|
2013-05-06 19:26:51 +02:00
|
|
|
/* Generate the data, if wanted. */
|
2013-03-04 01:23:31 +01:00
|
|
|
if (!stmt->skipData)
|
2017-03-18 22:49:06 +01:00
|
|
|
processed = refresh_matview_datafill(dest, dataQuery, queryString);
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
/* Make the matview match the newly generated data. */
|
|
|
|
if (concurrent)
|
|
|
|
{
|
|
|
|
int old_depth = matview_maintenance_depth;
|
|
|
|
|
|
|
|
PG_TRY();
|
|
|
|
{
|
2014-08-26 16:56:26 +02:00
|
|
|
refresh_by_match_merge(matviewOid, OIDNewHeap, relowner,
|
|
|
|
save_sec_context);
|
2013-07-16 19:55:44 +02:00
|
|
|
}
|
|
|
|
PG_CATCH();
|
|
|
|
{
|
|
|
|
matview_maintenance_depth = old_depth;
|
|
|
|
PG_RE_THROW();
|
|
|
|
}
|
|
|
|
PG_END_TRY();
|
|
|
|
Assert(matview_maintenance_depth == old_depth);
|
|
|
|
}
|
|
|
|
else
|
2017-03-18 22:49:06 +01:00
|
|
|
{
|
2014-11-15 05:19:49 +01:00
|
|
|
refresh_by_heap_swap(matviewOid, OIDNewHeap, relpersistence);
|
2014-08-25 21:32:18 +02:00
|
|
|
|
2017-03-18 22:49:06 +01:00
|
|
|
/*
|
|
|
|
* Inform stats collector about our activity: basically, we truncated
|
|
|
|
* the matview and inserted some new data. (The concurrent code path
|
|
|
|
* above doesn't need to worry about this because the inserts and
|
|
|
|
* deletes it issues get counted by lower-level code.)
|
|
|
|
*/
|
|
|
|
pgstat_count_truncate(matviewRel);
|
|
|
|
if (!stmt->skipData)
|
|
|
|
pgstat_count_heap_insert(matviewRel, processed);
|
|
|
|
}
|
|
|
|
|
2017-03-18 23:43:06 +01:00
|
|
|
heap_close(matviewRel, NoLock);
|
|
|
|
|
2014-08-26 16:56:26 +02:00
|
|
|
/* Roll back any GUC changes */
|
|
|
|
AtEOXact_GUC(false, save_nestlevel);
|
|
|
|
|
|
|
|
/* Restore userid and security context */
|
|
|
|
SetUserIdAndSecContext(save_userid, save_sec_context);
|
|
|
|
|
Change many routines to return ObjectAddress rather than OID
The changed routines are mostly those that can be directly called by
ProcessUtilitySlow; the intention is to make the affected object
information more precise, in support for future event trigger changes.
Originally it was envisioned that the OID of the affected object would
be enough, and in most cases that is correct, but upon actually
implementing the event trigger changes it turned out that ObjectAddress
is more widely useful.
Additionally, some command execution routines grew an output argument
that's an object address which provides further info about the executed
command. To wit:
* for ALTER DOMAIN / ADD CONSTRAINT, it corresponds to the address of
the new constraint
* for ALTER OBJECT / SET SCHEMA, it corresponds to the address of the
schema that originally contained the object.
* for ALTER EXTENSION {ADD, DROP} OBJECT, it corresponds to the address
of the object added to or dropped from the extension.
There's no user-visible change in this commit, and no functional change
either.
Discussion: 20150218213255.GC6717@tamriel.snowman.net
Reviewed-By: Stephen Frost, Andres Freund
2015-03-03 18:10:50 +01:00
|
|
|
ObjectAddressSet(address, RelationRelationId, matviewOid);
|
|
|
|
|
|
|
|
return address;
|
2013-03-04 01:23:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* refresh_matview_datafill
|
2017-03-18 22:49:06 +01:00
|
|
|
*
|
|
|
|
* Execute the given query, sending result rows to "dest" (which will
|
|
|
|
* insert them into the target matview).
|
|
|
|
*
|
|
|
|
* Returns number of rows inserted.
|
2013-03-04 01:23:31 +01:00
|
|
|
*/
|
2017-03-18 22:49:06 +01:00
|
|
|
static uint64
|
2013-03-04 01:23:31 +01:00
|
|
|
refresh_matview_datafill(DestReceiver *dest, Query *query,
|
2014-08-26 16:56:26 +02:00
|
|
|
const char *queryString)
|
2013-03-04 01:23:31 +01:00
|
|
|
{
|
2013-05-29 22:58:43 +02:00
|
|
|
List *rewritten;
|
2013-03-04 01:23:31 +01:00
|
|
|
PlannedStmt *plan;
|
|
|
|
QueryDesc *queryDesc;
|
2013-11-03 01:18:08 +01:00
|
|
|
Query *copied_query;
|
2017-03-18 22:49:06 +01:00
|
|
|
uint64 processed;
|
2013-07-16 19:55:44 +02:00
|
|
|
|
2013-11-03 01:18:08 +01:00
|
|
|
/* Lock and rewrite, using a copy to preserve the original query. */
|
|
|
|
copied_query = copyObject(query);
|
Avoid getting more than AccessShareLock when deparsing a query.
In make_ruledef and get_query_def, we have long used AcquireRewriteLocks
to ensure that the querytree we are about to deparse is up-to-date and
the schemas of the underlying relations aren't changing. Howwever, that
function thinks the query is about to be executed, so it acquires locks
that are stronger than necessary for the purpose of deparsing. Thus for
example, if pg_dump asks to deparse a rule that includes "INSERT INTO t",
we'd acquire RowExclusiveLock on t. That results in interference with
concurrent transactions that might for example ask for ShareLock on t.
Since pg_dump is documented as being purely read-only, this is unexpected.
(Worse, it used to actually be read-only; this behavior dates back only
to 8.1, cf commit ba4200246.)
Fix this by adding a parameter to AcquireRewriteLocks to tell it whether
we want the "real" execution locks or only AccessShareLock.
Report, diagnosis, and patch by Dean Rasheed. Back-patch to all supported
branches.
2014-03-07 01:31:05 +01:00
|
|
|
AcquireRewriteLocks(copied_query, true, false);
|
2013-11-03 01:18:08 +01:00
|
|
|
rewritten = QueryRewrite(copied_query);
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
/* SELECT should never rewrite to more or less than one SELECT query */
|
|
|
|
if (list_length(rewritten) != 1)
|
|
|
|
elog(ERROR, "unexpected rewrite result for REFRESH MATERIALIZED VIEW");
|
|
|
|
query = (Query *) linitial(rewritten);
|
|
|
|
|
|
|
|
/* Check for user-requested abort. */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
/* Plan the query which will generate data for the refresh. */
|
|
|
|
plan = pg_plan_query(query, 0, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use a snapshot with an updated command ID to ensure this query sees
|
2014-05-06 18:12:18 +02:00
|
|
|
* results of any previously executed queries. (This could only matter if
|
2013-03-04 01:23:31 +01:00
|
|
|
* the planner executed an allegedly-stable function that changed the
|
|
|
|
* database contents, but let's do it anyway to be safe.)
|
|
|
|
*/
|
|
|
|
PushCopiedSnapshot(GetActiveSnapshot());
|
|
|
|
UpdateActiveSnapshotCommandId();
|
|
|
|
|
|
|
|
/* Create a QueryDesc, redirecting output to our tuple receiver */
|
|
|
|
queryDesc = CreateQueryDesc(plan, queryString,
|
|
|
|
GetActiveSnapshot(), InvalidSnapshot,
|
2017-04-01 06:17:18 +02:00
|
|
|
dest, NULL, NULL, 0);
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
/* call ExecutorStart to prepare the plan for execution */
|
|
|
|
ExecutorStart(queryDesc, EXEC_FLAG_WITHOUT_OIDS);
|
|
|
|
|
|
|
|
/* run the plan */
|
2017-03-23 18:05:48 +01:00
|
|
|
ExecutorRun(queryDesc, ForwardScanDirection, 0L, true);
|
2013-03-04 01:23:31 +01:00
|
|
|
|
2017-03-18 22:49:06 +01:00
|
|
|
processed = queryDesc->estate->es_processed;
|
|
|
|
|
2013-03-04 01:23:31 +01:00
|
|
|
/* and clean up */
|
|
|
|
ExecutorFinish(queryDesc);
|
|
|
|
ExecutorEnd(queryDesc);
|
|
|
|
|
|
|
|
FreeQueryDesc(queryDesc);
|
|
|
|
|
|
|
|
PopActiveSnapshot();
|
2017-03-18 22:49:06 +01:00
|
|
|
|
|
|
|
return processed;
|
2013-03-04 01:23:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DestReceiver *
|
|
|
|
CreateTransientRelDestReceiver(Oid transientoid)
|
|
|
|
{
|
|
|
|
DR_transientrel *self = (DR_transientrel *) palloc0(sizeof(DR_transientrel));
|
|
|
|
|
|
|
|
self->pub.receiveSlot = transientrel_receive;
|
|
|
|
self->pub.rStartup = transientrel_startup;
|
|
|
|
self->pub.rShutdown = transientrel_shutdown;
|
|
|
|
self->pub.rDestroy = transientrel_destroy;
|
|
|
|
self->pub.mydest = DestTransientRel;
|
|
|
|
self->transientoid = transientoid;
|
|
|
|
|
|
|
|
return (DestReceiver *) self;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* transientrel_startup --- executor startup
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
transientrel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
|
|
|
|
{
|
|
|
|
DR_transientrel *myState = (DR_transientrel *) self;
|
2013-05-29 22:58:43 +02:00
|
|
|
Relation transientrel;
|
2013-03-04 01:23:31 +01:00
|
|
|
|
|
|
|
transientrel = heap_open(myState->transientoid, NoLock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill private fields of myState for use by later routines
|
|
|
|
*/
|
|
|
|
myState->transientrel = transientrel;
|
|
|
|
myState->output_cid = GetCurrentCommandId(true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can skip WAL-logging the insertions, unless PITR or streaming
|
|
|
|
* replication is in use. We can skip the FSM in any case.
|
|
|
|
*/
|
|
|
|
myState->hi_options = HEAP_INSERT_SKIP_FSM | HEAP_INSERT_FROZEN;
|
|
|
|
if (!XLogIsNeeded())
|
|
|
|
myState->hi_options |= HEAP_INSERT_SKIP_WAL;
|
|
|
|
myState->bistate = GetBulkInsertState();
|
|
|
|
|
|
|
|
/* Not using WAL requires smgr_targblock be initially invalid */
|
|
|
|
Assert(RelationGetTargetBlock(transientrel) == InvalidBlockNumber);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* transientrel_receive --- receive one tuple
|
|
|
|
*/
|
2016-06-06 20:52:58 +02:00
|
|
|
static bool
|
2013-03-04 01:23:31 +01:00
|
|
|
transientrel_receive(TupleTableSlot *slot, DestReceiver *self)
|
|
|
|
{
|
|
|
|
DR_transientrel *myState = (DR_transientrel *) self;
|
|
|
|
HeapTuple tuple;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get the heap tuple out of the tuple table slot, making sure we have a
|
|
|
|
* writable copy
|
|
|
|
*/
|
|
|
|
tuple = ExecMaterializeSlot(slot);
|
|
|
|
|
|
|
|
heap_insert(myState->transientrel,
|
|
|
|
tuple,
|
|
|
|
myState->output_cid,
|
|
|
|
myState->hi_options,
|
|
|
|
myState->bistate);
|
|
|
|
|
|
|
|
/* We know this is a newly created relation, so there are no indexes */
|
2016-06-06 20:52:58 +02:00
|
|
|
|
|
|
|
return true;
|
2013-03-04 01:23:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* transientrel_shutdown --- executor end
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
transientrel_shutdown(DestReceiver *self)
|
|
|
|
{
|
|
|
|
DR_transientrel *myState = (DR_transientrel *) self;
|
|
|
|
|
|
|
|
FreeBulkInsertState(myState->bistate);
|
|
|
|
|
|
|
|
/* If we skipped using WAL, must heap_sync before commit */
|
|
|
|
if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
|
|
|
|
heap_sync(myState->transientrel);
|
|
|
|
|
|
|
|
/* close transientrel, but keep lock until commit */
|
|
|
|
heap_close(myState->transientrel, NoLock);
|
|
|
|
myState->transientrel = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* transientrel_destroy --- release DestReceiver object
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
transientrel_destroy(DestReceiver *self)
|
|
|
|
{
|
|
|
|
pfree(self);
|
|
|
|
}
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a qualified temporary table name, append an underscore followed by
|
|
|
|
* the given integer, to make a new table name based on the old one.
|
|
|
|
*
|
|
|
|
* This leaks memory through palloc(), which won't be cleaned up until the
|
2014-11-06 12:04:11 +01:00
|
|
|
* current memory context is freed.
|
2013-07-16 19:55:44 +02:00
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
make_temptable_name_n(char *tempname, int n)
|
|
|
|
{
|
|
|
|
StringInfoData namebuf;
|
|
|
|
|
|
|
|
initStringInfo(&namebuf);
|
|
|
|
appendStringInfoString(&namebuf, tempname);
|
2016-04-08 18:40:15 +02:00
|
|
|
appendStringInfo(&namebuf, "_%d", n);
|
2013-07-16 19:55:44 +02:00
|
|
|
return namebuf.data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* refresh_by_match_merge
|
|
|
|
*
|
|
|
|
* Refresh a materialized view with transactional semantics, while allowing
|
|
|
|
* concurrent reads.
|
|
|
|
*
|
|
|
|
* This is called after a new version of the data has been created in a
|
|
|
|
* temporary table. It performs a full outer join against the old version of
|
2014-05-06 18:12:18 +02:00
|
|
|
* the data, producing "diff" results. This join cannot work if there are any
|
2013-07-16 19:55:44 +02:00
|
|
|
* duplicated rows in either the old or new versions, in the sense that every
|
2014-05-06 18:12:18 +02:00
|
|
|
* column would compare as equal between the two rows. It does work correctly
|
2013-07-16 19:55:44 +02:00
|
|
|
* in the face of rows which have at least one NULL value, with all non-NULL
|
|
|
|
* columns equal. The behavior of NULLs on equality tests and on UNIQUE
|
|
|
|
* indexes turns out to be quite convenient here; the tests we need to make
|
|
|
|
* are consistent with default behavior. If there is at least one UNIQUE
|
2013-08-05 16:57:56 +02:00
|
|
|
* index on the materialized view, we have exactly the guarantee we need.
|
2013-07-16 19:55:44 +02:00
|
|
|
*
|
2013-08-05 16:57:56 +02:00
|
|
|
* The temporary table used to hold the diff results contains just the TID of
|
|
|
|
* the old record (if matched) and the ROW from the new table as a single
|
|
|
|
* column of complex record type (if matched).
|
|
|
|
*
|
|
|
|
* Once we have the diff table, we perform set-based DELETE and INSERT
|
|
|
|
* operations against the materialized view, and discard both temporary
|
2013-07-16 19:55:44 +02:00
|
|
|
* tables.
|
|
|
|
*
|
|
|
|
* Everything from the generation of the new data to applying the differences
|
|
|
|
* takes place under cover of an ExclusiveLock, since it seems as though we
|
|
|
|
* would want to prohibit not only concurrent REFRESH operations, but also
|
|
|
|
* incremental maintenance. It also doesn't seem reasonable or safe to allow
|
|
|
|
* SELECT FOR UPDATE or SELECT FOR SHARE on rows being updated or deleted by
|
|
|
|
* this command.
|
|
|
|
*/
|
|
|
|
static void
|
2014-08-26 16:56:26 +02:00
|
|
|
refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
|
|
|
|
int save_sec_context)
|
2013-07-16 19:55:44 +02:00
|
|
|
{
|
|
|
|
StringInfoData querybuf;
|
|
|
|
Relation matviewRel;
|
|
|
|
Relation tempRel;
|
|
|
|
char *matviewname;
|
|
|
|
char *tempname;
|
|
|
|
char *diffname;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
bool foundUniqueIndex;
|
|
|
|
List *indexoidlist;
|
|
|
|
ListCell *indexoidscan;
|
|
|
|
int16 relnatts;
|
2018-03-19 23:49:53 +01:00
|
|
|
Oid *opUsedForQual;
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
initStringInfo(&querybuf);
|
|
|
|
matviewRel = heap_open(matviewOid, NoLock);
|
|
|
|
matviewname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
RelationGetRelationName(matviewRel));
|
2013-07-16 19:55:44 +02:00
|
|
|
tempRel = heap_open(tempOid, NoLock);
|
|
|
|
tempname = quote_qualified_identifier(get_namespace_name(RelationGetNamespace(tempRel)),
|
|
|
|
RelationGetRelationName(tempRel));
|
|
|
|
diffname = make_temptable_name_n(tempname, 2);
|
|
|
|
|
2016-04-08 20:52:13 +02:00
|
|
|
relnatts = matviewRel->rd_rel->relnatts;
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
/* Open SPI context. */
|
|
|
|
if (SPI_connect() != SPI_OK_CONNECT)
|
|
|
|
elog(ERROR, "SPI_connect failed");
|
|
|
|
|
|
|
|
/* Analyze the temp table with the new contents. */
|
|
|
|
appendStringInfo(&querybuf, "ANALYZE %s", tempname);
|
|
|
|
if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY)
|
|
|
|
elog(ERROR, "SPI_exec failed: %s", querybuf.data);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to ensure that there are not duplicate rows without NULLs in
|
2014-05-06 18:12:18 +02:00
|
|
|
* the new data set before we can count on the "diff" results. Check for
|
2013-07-16 19:55:44 +02:00
|
|
|
* that in a way that allows showing the first duplicated row found. Even
|
|
|
|
* after we pass this test, a unique index on the materialized view may
|
|
|
|
* find a duplicate key problem.
|
|
|
|
*/
|
|
|
|
resetStringInfo(&querybuf);
|
|
|
|
appendStringInfo(&querybuf,
|
2013-08-05 16:57:56 +02:00
|
|
|
"SELECT newdata FROM %s newdata "
|
|
|
|
"WHERE newdata IS NOT NULL AND EXISTS "
|
Fix performance hazard in REFRESH MATERIALIZED VIEW CONCURRENTLY.
Jeff Janes discovered that commit 7ca25b7de made one of the queries run by
REFRESH MATERIALIZED VIEW CONCURRENTLY perform badly. The root cause is
bad cardinality estimation for correlated quals, but a principled solution
to that problem is some way off, especially since the planner lacks any
statistics about whole-row variables. Moreover, in non-error cases this
query produces no rows, meaning it must be run to completion; but use of
LIMIT 1 encourages the planner to pick a fast-start, slow-completion plan,
exactly not what we want. Remove the LIMIT clause, and instead rely on
the count parameter we pass to SPI_execute() to prevent excess work if the
query does return some rows.
While we've heard no field reports of planner misbehavior with this query,
it could be that people are having performance issues that haven't reached
the level of pain needed to cause a bug report. In any case, that LIMIT
clause can't possibly do anything helpful with any existing version of the
planner, and it demonstrably can cause bad choices in some cases, so
back-patch to 9.4 where the code was introduced.
Thomas Munro
Discussion: https://postgr.es/m/CAMkU=1z-JoGymHneGHar1cru4F1XDfHqJDzxP_CtK5cL3DOfmg@mail.gmail.com
2018-03-19 22:23:07 +01:00
|
|
|
"(SELECT 1 FROM %s newdata2 WHERE newdata2 IS NOT NULL "
|
2013-10-09 21:26:09 +02:00
|
|
|
"AND newdata2 OPERATOR(pg_catalog.*=) newdata "
|
2013-08-05 16:57:56 +02:00
|
|
|
"AND newdata2.ctid OPERATOR(pg_catalog.<>) "
|
Fix performance hazard in REFRESH MATERIALIZED VIEW CONCURRENTLY.
Jeff Janes discovered that commit 7ca25b7de made one of the queries run by
REFRESH MATERIALIZED VIEW CONCURRENTLY perform badly. The root cause is
bad cardinality estimation for correlated quals, but a principled solution
to that problem is some way off, especially since the planner lacks any
statistics about whole-row variables. Moreover, in non-error cases this
query produces no rows, meaning it must be run to completion; but use of
LIMIT 1 encourages the planner to pick a fast-start, slow-completion plan,
exactly not what we want. Remove the LIMIT clause, and instead rely on
the count parameter we pass to SPI_execute() to prevent excess work if the
query does return some rows.
While we've heard no field reports of planner misbehavior with this query,
it could be that people are having performance issues that haven't reached
the level of pain needed to cause a bug report. In any case, that LIMIT
clause can't possibly do anything helpful with any existing version of the
planner, and it demonstrably can cause bad choices in some cases, so
back-patch to 9.4 where the code was introduced.
Thomas Munro
Discussion: https://postgr.es/m/CAMkU=1z-JoGymHneGHar1cru4F1XDfHqJDzxP_CtK5cL3DOfmg@mail.gmail.com
2018-03-19 22:23:07 +01:00
|
|
|
"newdata.ctid)",
|
2013-07-16 19:55:44 +02:00
|
|
|
tempname, tempname);
|
|
|
|
if (SPI_execute(querybuf.data, false, 1) != SPI_OK_SELECT)
|
|
|
|
elog(ERROR, "SPI_exec failed: %s", querybuf.data);
|
|
|
|
if (SPI_processed > 0)
|
|
|
|
{
|
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
2015-01-12 23:04:11 +01:00
|
|
|
/*
|
|
|
|
* Note that this ereport() is returning data to the user. Generally,
|
|
|
|
* we would want to make sure that the user has been granted access to
|
|
|
|
* this data. However, REFRESH MAT VIEW is only able to be run by the
|
|
|
|
* owner of the mat view (or a superuser) and therefore there is no
|
|
|
|
* need to check for access to data in the mat view.
|
|
|
|
*/
|
2013-07-16 19:55:44 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_CARDINALITY_VIOLATION),
|
2015-10-29 01:23:53 +01:00
|
|
|
errmsg("new data for materialized view \"%s\" contains duplicate rows without any null columns",
|
2013-07-16 19:55:44 +02:00
|
|
|
RelationGetRelationName(matviewRel)),
|
|
|
|
errdetail("Row: %s",
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1))));
|
2013-07-16 19:55:44 +02:00
|
|
|
}
|
|
|
|
|
2014-08-26 16:56:26 +02:00
|
|
|
SetUserIdAndSecContext(relowner,
|
|
|
|
save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
|
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
/* Start building the query for creating the diff table. */
|
|
|
|
resetStringInfo(&querybuf);
|
|
|
|
appendStringInfo(&querybuf,
|
|
|
|
"CREATE TEMP TABLE %s AS "
|
2013-08-05 16:57:56 +02:00
|
|
|
"SELECT mv.ctid AS tid, newdata "
|
|
|
|
"FROM %s mv FULL JOIN %s newdata ON (",
|
2013-07-16 19:55:44 +02:00
|
|
|
diffname, matviewname, tempname);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the list of index OIDs for the table from the relcache, and look up
|
|
|
|
* each one in the pg_index syscache. We will test for equality on all
|
|
|
|
* columns present in all unique indexes which only reference columns and
|
|
|
|
* include all rows.
|
|
|
|
*/
|
|
|
|
tupdesc = matviewRel->rd_att;
|
2018-03-19 23:49:53 +01:00
|
|
|
opUsedForQual = (Oid *) palloc0(sizeof(Oid) * relnatts);
|
2013-07-16 19:55:44 +02:00
|
|
|
foundUniqueIndex = false;
|
2018-03-19 23:49:53 +01:00
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
indexoidlist = RelationGetIndexList(matviewRel);
|
|
|
|
|
|
|
|
foreach(indexoidscan, indexoidlist)
|
|
|
|
{
|
|
|
|
Oid indexoid = lfirst_oid(indexoidscan);
|
2013-08-05 16:57:56 +02:00
|
|
|
Relation indexRel;
|
2013-07-16 19:55:44 +02:00
|
|
|
|
2013-08-05 16:57:56 +02:00
|
|
|
indexRel = index_open(indexoid, RowExclusiveLock);
|
2018-03-19 23:49:53 +01:00
|
|
|
if (is_usable_unique_index(indexRel))
|
2013-07-16 19:55:44 +02:00
|
|
|
{
|
2018-03-19 23:49:53 +01:00
|
|
|
Form_pg_index indexStruct = indexRel->rd_index;
|
2016-04-08 20:52:13 +02:00
|
|
|
int numatts = indexStruct->indnatts;
|
2018-03-19 23:49:53 +01:00
|
|
|
oidvector *indclass;
|
|
|
|
Datum indclassDatum;
|
|
|
|
bool isnull;
|
2013-07-16 19:55:44 +02:00
|
|
|
int i;
|
|
|
|
|
2018-03-19 23:49:53 +01:00
|
|
|
/* Must get indclass the hard way. */
|
|
|
|
indclassDatum = SysCacheGetAttr(INDEXRELID,
|
|
|
|
indexRel->rd_indextuple,
|
|
|
|
Anum_pg_index_indclass,
|
|
|
|
&isnull);
|
|
|
|
Assert(!isnull);
|
|
|
|
indclass = (oidvector *) DatumGetPointer(indclassDatum);
|
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
/* Add quals for all columns from this index. */
|
2016-04-08 20:52:13 +02:00
|
|
|
for (i = 0; i < numatts; i++)
|
2013-07-16 19:55:44 +02:00
|
|
|
{
|
2013-08-05 16:57:56 +02:00
|
|
|
int attnum = indexStruct->indkey.values[i];
|
2018-03-19 23:49:53 +01:00
|
|
|
Oid opclass = indclass->values[i];
|
2017-08-20 20:19:07 +02:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1);
|
2018-03-19 23:49:53 +01:00
|
|
|
Oid attrtype = attr->atttypid;
|
|
|
|
HeapTuple cla_ht;
|
|
|
|
Form_pg_opclass cla_tup;
|
|
|
|
Oid opfamily;
|
|
|
|
Oid opcintype;
|
2013-07-16 19:55:44 +02:00
|
|
|
Oid op;
|
2018-03-19 23:49:53 +01:00
|
|
|
const char *leftop;
|
|
|
|
const char *rightop;
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
/*
|
2018-03-19 23:49:53 +01:00
|
|
|
* Identify the equality operator associated with this index
|
|
|
|
* column. First we need to look up the column's opclass.
|
2013-07-16 19:55:44 +02:00
|
|
|
*/
|
2018-03-19 23:49:53 +01:00
|
|
|
cla_ht = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));
|
|
|
|
if (!HeapTupleIsValid(cla_ht))
|
|
|
|
elog(ERROR, "cache lookup failed for opclass %u", opclass);
|
|
|
|
cla_tup = (Form_pg_opclass) GETSTRUCT(cla_ht);
|
|
|
|
Assert(cla_tup->opcmethod == BTREE_AM_OID);
|
|
|
|
opfamily = cla_tup->opcfamily;
|
|
|
|
opcintype = cla_tup->opcintype;
|
|
|
|
ReleaseSysCache(cla_ht);
|
|
|
|
|
|
|
|
op = get_opfamily_member(opfamily, opcintype, opcintype,
|
|
|
|
BTEqualStrategyNumber);
|
|
|
|
if (!OidIsValid(op))
|
|
|
|
elog(ERROR, "missing operator %d(%u,%u) in opfamily %u",
|
|
|
|
BTEqualStrategyNumber, opcintype, opcintype, opfamily);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we find the same column with the same equality semantics
|
|
|
|
* in more than one index, we only need to emit the equality
|
|
|
|
* clause once.
|
|
|
|
*
|
|
|
|
* Since we only remember the last equality operator, this
|
|
|
|
* code could be fooled into emitting duplicate clauses given
|
|
|
|
* multiple indexes with several different opclasses ... but
|
|
|
|
* that's so unlikely it doesn't seem worth spending extra
|
|
|
|
* code to avoid.
|
|
|
|
*/
|
|
|
|
if (opUsedForQual[attnum - 1] == op)
|
2013-07-16 19:55:44 +02:00
|
|
|
continue;
|
2018-03-19 23:49:53 +01:00
|
|
|
opUsedForQual[attnum - 1] = op;
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Actually add the qual, ANDed with any others.
|
|
|
|
*/
|
|
|
|
if (foundUniqueIndex)
|
|
|
|
appendStringInfoString(&querybuf, " AND ");
|
|
|
|
|
2018-03-19 23:49:53 +01:00
|
|
|
leftop = quote_qualified_identifier("newdata",
|
|
|
|
NameStr(attr->attname));
|
|
|
|
rightop = quote_qualified_identifier("mv",
|
|
|
|
NameStr(attr->attname));
|
|
|
|
|
|
|
|
generate_operator_clause(&querybuf,
|
|
|
|
leftop, attrtype,
|
|
|
|
op,
|
|
|
|
rightop, attrtype);
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
foundUniqueIndex = true;
|
|
|
|
}
|
|
|
|
}
|
2014-03-18 16:36:45 +01:00
|
|
|
|
|
|
|
/* Keep the locks, since we're about to run DML which needs them. */
|
|
|
|
index_close(indexRel, NoLock);
|
2013-07-16 19:55:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
list_free(indexoidlist);
|
|
|
|
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
/*
|
2018-03-19 23:49:53 +01:00
|
|
|
* There must be at least one usable unique index on the matview.
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
*
|
2016-06-10 00:02:36 +02:00
|
|
|
* ExecRefreshMatView() checks that after taking the exclusive lock on the
|
|
|
|
* matview. So at least one unique index is guaranteed to exist here
|
2018-03-19 23:49:53 +01:00
|
|
|
* because the lock is still being held; so an Assert seems sufficient.
|
Make concurrent refresh check early that there is a unique index on matview.
In REFRESH MATERIALIZED VIEW command, CONCURRENTLY option is only
allowed if there is at least one unique index with no WHERE clause on
one or more columns of the matview. Previously, concurrent refresh
checked the existence of a unique index on the matview after filling
the data to new snapshot, i.e., after calling refresh_matview_datafill().
So, when there was no unique index, we could need to wait a long time
before we detected that and got the error. It was a waste of time.
To eliminate such wasting time, this commit changes concurrent refresh
so that it checks the existence of a unique index at the beginning of
the refresh operation, i.e., before starting any time-consuming jobs.
If CONCURRENTLY option is not allowed due to lack of a unique index,
concurrent refresh can immediately detect it and emit an error.
Author: Masahiko Sawada
Reviewed-by: Michael Paquier, Fujii Masao
2016-02-15 18:15:44 +01:00
|
|
|
*/
|
|
|
|
Assert(foundUniqueIndex);
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
appendStringInfoString(&querybuf,
|
2013-10-09 21:26:09 +02:00
|
|
|
" AND newdata OPERATOR(pg_catalog.*=) mv) "
|
|
|
|
"WHERE newdata IS NULL OR mv IS NULL "
|
|
|
|
"ORDER BY tid");
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
/* Create the temporary "diff" table. */
|
|
|
|
if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY)
|
|
|
|
elog(ERROR, "SPI_exec failed: %s", querybuf.data);
|
|
|
|
|
2014-08-26 16:56:26 +02:00
|
|
|
SetUserIdAndSecContext(relowner,
|
|
|
|
save_sec_context | SECURITY_RESTRICTED_OPERATION);
|
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
/*
|
|
|
|
* We have no further use for data from the "full-data" temp table, but we
|
2014-08-26 16:56:26 +02:00
|
|
|
* must keep it around because its type is referenced from the diff table.
|
2013-07-16 19:55:44 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Analyze the diff table. */
|
|
|
|
resetStringInfo(&querybuf);
|
|
|
|
appendStringInfo(&querybuf, "ANALYZE %s", diffname);
|
|
|
|
if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY)
|
|
|
|
elog(ERROR, "SPI_exec failed: %s", querybuf.data);
|
|
|
|
|
|
|
|
OpenMatViewIncrementalMaintenance();
|
|
|
|
|
|
|
|
/* Deletes must come before inserts; do them first. */
|
|
|
|
resetStringInfo(&querybuf);
|
|
|
|
appendStringInfo(&querybuf,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
"DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY "
|
2013-08-05 16:57:56 +02:00
|
|
|
"(SELECT diff.tid FROM %s diff "
|
|
|
|
"WHERE diff.tid IS NOT NULL "
|
|
|
|
"AND diff.newdata IS NULL)",
|
2013-07-16 19:55:44 +02:00
|
|
|
matviewname, diffname);
|
|
|
|
if (SPI_exec(querybuf.data, 0) != SPI_OK_DELETE)
|
|
|
|
elog(ERROR, "SPI_exec failed: %s", querybuf.data);
|
|
|
|
|
|
|
|
/* Inserts go last. */
|
|
|
|
resetStringInfo(&querybuf);
|
|
|
|
appendStringInfo(&querybuf,
|
2013-08-05 16:57:56 +02:00
|
|
|
"INSERT INTO %s SELECT (diff.newdata).* "
|
|
|
|
"FROM %s diff WHERE tid IS NULL",
|
2013-07-16 19:55:44 +02:00
|
|
|
matviewname, diffname);
|
|
|
|
if (SPI_exec(querybuf.data, 0) != SPI_OK_INSERT)
|
|
|
|
elog(ERROR, "SPI_exec failed: %s", querybuf.data);
|
|
|
|
|
|
|
|
/* We're done maintaining the materialized view. */
|
|
|
|
CloseMatViewIncrementalMaintenance();
|
|
|
|
heap_close(tempRel, NoLock);
|
|
|
|
heap_close(matviewRel, NoLock);
|
|
|
|
|
|
|
|
/* Clean up temp tables. */
|
|
|
|
resetStringInfo(&querybuf);
|
|
|
|
appendStringInfo(&querybuf, "DROP TABLE %s, %s", diffname, tempname);
|
|
|
|
if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY)
|
|
|
|
elog(ERROR, "SPI_exec failed: %s", querybuf.data);
|
|
|
|
|
|
|
|
/* Close SPI context. */
|
|
|
|
if (SPI_finish() != SPI_OK_FINISH)
|
|
|
|
elog(ERROR, "SPI_finish failed");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Swap the physical files of the target and transient tables, then rebuild
|
|
|
|
* the target's indexes and throw away the transient table. Security context
|
|
|
|
* swapping is handled by the called function, so it is not needed here.
|
|
|
|
*/
|
|
|
|
static void
|
2014-11-15 05:19:49 +01:00
|
|
|
refresh_by_heap_swap(Oid matviewOid, Oid OIDNewHeap, char relpersistence)
|
2013-07-16 19:55:44 +02:00
|
|
|
{
|
|
|
|
finish_heap_swap(matviewOid, OIDNewHeap, false, false, true, true,
|
2014-11-15 05:19:49 +01:00
|
|
|
RecentXmin, ReadNextMultiXactId(), relpersistence);
|
2013-07-16 19:55:44 +02:00
|
|
|
}
|
|
|
|
|
2018-03-19 23:49:53 +01:00
|
|
|
/*
|
|
|
|
* Check whether specified index is usable for match merge.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
is_usable_unique_index(Relation indexRel)
|
|
|
|
{
|
|
|
|
Form_pg_index indexStruct = indexRel->rd_index;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must be unique, valid, immediate, non-partial, and be defined over
|
|
|
|
* plain user columns (not expressions). We also require it to be a
|
|
|
|
* btree. Even if we had any other unique index kinds, we'd not know how
|
|
|
|
* to identify the corresponding equality operator, nor could we be sure
|
|
|
|
* that the planner could implement the required FULL JOIN with non-btree
|
|
|
|
* operators.
|
|
|
|
*/
|
|
|
|
if (indexStruct->indisunique &&
|
|
|
|
indexStruct->indimmediate &&
|
|
|
|
indexRel->rd_rel->relam == BTREE_AM_OID &&
|
|
|
|
IndexIsValid(indexStruct) &&
|
|
|
|
RelationGetIndexPredicate(indexRel) == NIL &&
|
|
|
|
indexStruct->indnatts > 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The point of groveling through the index columns individually is to
|
|
|
|
* reject both index expressions and system columns. Currently,
|
|
|
|
* matviews couldn't have OID columns so there's no way to create an
|
|
|
|
* index on a system column; but maybe someday that wouldn't be true,
|
|
|
|
* so let's be safe.
|
|
|
|
*/
|
|
|
|
int numatts = indexStruct->indnatts;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < numatts; i++)
|
|
|
|
{
|
|
|
|
int attnum = indexStruct->indkey.values[i];
|
|
|
|
|
|
|
|
if (attnum <= 0)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-07-16 19:55:44 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This should be used to test whether the backend is in a context where it is
|
|
|
|
* OK to allow DML statements to modify materialized views. We only want to
|
|
|
|
* allow that for internal code driven by the materialized view definition,
|
|
|
|
* not for arbitrary user-supplied code.
|
2013-08-01 21:31:09 +02:00
|
|
|
*
|
|
|
|
* While the function names reflect the fact that their main intended use is
|
|
|
|
* incremental maintenance of materialized views (in response to changes to
|
|
|
|
* the data in referenced relations), they are initially used to allow REFRESH
|
|
|
|
* without blocking concurrent reads.
|
2013-07-16 19:55:44 +02:00
|
|
|
*/
|
|
|
|
bool
|
|
|
|
MatViewIncrementalMaintenanceIsEnabled(void)
|
|
|
|
{
|
|
|
|
return matview_maintenance_depth > 0;
|
|
|
|
}
|
2013-08-01 21:31:09 +02:00
|
|
|
|
|
|
|
static void
|
|
|
|
OpenMatViewIncrementalMaintenance(void)
|
|
|
|
{
|
|
|
|
matview_maintenance_depth++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
CloseMatViewIncrementalMaintenance(void)
|
|
|
|
{
|
|
|
|
matview_maintenance_depth--;
|
|
|
|
Assert(matview_maintenance_depth >= 0);
|
|
|
|
}
|