2002-07-12 20:43:19 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_depend.c
|
|
|
|
* routines to support manipulation of the pg_depend relation
|
|
|
|
*
|
2014-01-07 22:05:30 +01:00
|
|
|
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
2002-07-12 20:43:19 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/catalog/pg_depend.c
|
2002-07-12 20:43:19 +02:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
#include "access/genam.h"
|
|
|
|
#include "access/heapam.h"
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "access/htup_details.h"
|
2002-07-12 20:43:19 +02:00
|
|
|
#include "catalog/dependency.h"
|
2006-07-11 19:26:59 +02:00
|
|
|
#include "catalog/indexing.h"
|
2007-12-02 00:44:44 +01:00
|
|
|
#include "catalog/pg_constraint.h"
|
2002-07-12 20:43:19 +02:00
|
|
|
#include "catalog/pg_depend.h"
|
2011-02-08 22:08:41 +01:00
|
|
|
#include "catalog/pg_extension.h"
|
|
|
|
#include "commands/extension.h"
|
2002-07-12 20:43:19 +02:00
|
|
|
#include "miscadmin.h"
|
|
|
|
#include "utils/fmgroids.h"
|
2007-12-02 00:44:44 +01:00
|
|
|
#include "utils/lsyscache.h"
|
2008-06-19 02:46:06 +02:00
|
|
|
#include "utils/rel.h"
|
2008-03-26 22:10:39 +01:00
|
|
|
#include "utils/tqual.h"
|
2002-07-12 20:43:19 +02:00
|
|
|
|
|
|
|
|
|
|
|
static bool isObjectPinned(const ObjectAddress *object, Relation rel);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record a dependency between 2 objects via their respective objectAddress.
|
|
|
|
* The first argument is the dependent object, the second the one it
|
|
|
|
* references.
|
|
|
|
*
|
|
|
|
* This simply creates an entry in pg_depend, without any other processing.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
recordDependencyOn(const ObjectAddress *depender,
|
|
|
|
const ObjectAddress *referenced,
|
|
|
|
DependencyType behavior)
|
2002-07-16 07:53:34 +02:00
|
|
|
{
|
|
|
|
recordMultipleDependencies(depender, referenced, 1, behavior);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record multiple dependencies (of the same kind) for a single dependent
|
2002-09-04 22:31:48 +02:00
|
|
|
* object. This has a little less overhead than recording each separately.
|
2002-07-16 07:53:34 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
recordMultipleDependencies(const ObjectAddress *depender,
|
|
|
|
const ObjectAddress *referenced,
|
|
|
|
int nreferenced,
|
|
|
|
DependencyType behavior)
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
|
|
|
Relation dependDesc;
|
2002-08-05 05:29:17 +02:00
|
|
|
CatalogIndexState indstate;
|
2002-07-12 20:43:19 +02:00
|
|
|
HeapTuple tup;
|
|
|
|
int i;
|
2008-11-02 02:45:28 +01:00
|
|
|
bool nulls[Natts_pg_depend];
|
2002-07-12 20:43:19 +02:00
|
|
|
Datum values[Natts_pg_depend];
|
2002-07-16 07:53:34 +02:00
|
|
|
|
|
|
|
if (nreferenced <= 0)
|
|
|
|
return; /* nothing to do */
|
2002-07-12 20:43:19 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* During bootstrap, do nothing since pg_depend may not exist yet. initdb
|
|
|
|
* will fill in appropriate pg_depend entries after bootstrap.
|
2002-07-12 20:43:19 +02:00
|
|
|
*/
|
|
|
|
if (IsBootstrapProcessingMode())
|
|
|
|
return;
|
|
|
|
|
2005-04-14 22:03:27 +02:00
|
|
|
dependDesc = heap_open(DependRelationId, RowExclusiveLock);
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2002-08-05 05:29:17 +02:00
|
|
|
/* Don't open indexes unless we need to make an update */
|
|
|
|
indstate = NULL;
|
|
|
|
|
2008-11-02 02:45:28 +01:00
|
|
|
memset(nulls, false, sizeof(nulls));
|
2002-07-16 07:53:34 +02:00
|
|
|
|
|
|
|
for (i = 0; i < nreferenced; i++, referenced++)
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If the referenced object is pinned by the system, there's no real
|
|
|
|
* need to record dependencies on it. This saves lots of space in
|
|
|
|
* pg_depend, so it's worth the time taken to check.
|
2002-07-12 20:43:19 +02:00
|
|
|
*/
|
2002-07-16 07:53:34 +02:00
|
|
|
if (!isObjectPinned(referenced, dependDesc))
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
2002-07-16 07:53:34 +02:00
|
|
|
/*
|
|
|
|
* Record the Dependency. Note we don't bother to check for
|
|
|
|
* duplicate dependencies; there's no harm in them.
|
|
|
|
*/
|
2002-09-04 22:31:48 +02:00
|
|
|
values[Anum_pg_depend_classid - 1] = ObjectIdGetDatum(depender->classId);
|
|
|
|
values[Anum_pg_depend_objid - 1] = ObjectIdGetDatum(depender->objectId);
|
|
|
|
values[Anum_pg_depend_objsubid - 1] = Int32GetDatum(depender->objectSubId);
|
2002-07-16 07:53:34 +02:00
|
|
|
|
2002-09-04 22:31:48 +02:00
|
|
|
values[Anum_pg_depend_refclassid - 1] = ObjectIdGetDatum(referenced->classId);
|
|
|
|
values[Anum_pg_depend_refobjid - 1] = ObjectIdGetDatum(referenced->objectId);
|
|
|
|
values[Anum_pg_depend_refobjsubid - 1] = Int32GetDatum(referenced->objectSubId);
|
2002-07-16 07:53:34 +02:00
|
|
|
|
2002-09-04 22:31:48 +02:00
|
|
|
values[Anum_pg_depend_deptype - 1] = CharGetDatum((char) behavior);
|
2002-07-16 07:53:34 +02:00
|
|
|
|
2008-11-02 02:45:28 +01:00
|
|
|
tup = heap_form_tuple(dependDesc->rd_att, values, nulls);
|
2002-07-16 07:53:34 +02:00
|
|
|
|
|
|
|
simple_heap_insert(dependDesc, tup);
|
|
|
|
|
2002-08-05 05:29:17 +02:00
|
|
|
/* keep indexes current */
|
|
|
|
if (indstate == NULL)
|
|
|
|
indstate = CatalogOpenIndexes(dependDesc);
|
|
|
|
|
|
|
|
CatalogIndexInsert(indstate, tup);
|
2002-07-16 07:53:34 +02:00
|
|
|
|
|
|
|
heap_freetuple(tup);
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|
2002-07-16 07:53:34 +02:00
|
|
|
}
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2002-08-05 05:29:17 +02:00
|
|
|
if (indstate != NULL)
|
|
|
|
CatalogCloseIndexes(indstate);
|
2002-07-12 20:43:19 +02:00
|
|
|
|
|
|
|
heap_close(dependDesc, RowExclusiveLock);
|
|
|
|
}
|
|
|
|
|
2011-02-08 22:08:41 +01:00
|
|
|
/*
|
|
|
|
* If we are executing a CREATE EXTENSION operation, mark the given object
|
2011-04-10 17:42:00 +02:00
|
|
|
* as being a member of the extension. Otherwise, do nothing.
|
2011-02-08 22:08:41 +01:00
|
|
|
*
|
|
|
|
* This must be called during creation of any user-definable object type
|
|
|
|
* that could be a member of an extension.
|
2011-07-23 22:59:39 +02:00
|
|
|
*
|
|
|
|
* If isReplace is true, the object already existed (or might have already
|
|
|
|
* existed), so we must check for a pre-existing extension membership entry.
|
|
|
|
* Passing false is a guarantee that the object is newly created, and so
|
|
|
|
* could not already be a member of any extension.
|
2011-02-08 22:08:41 +01:00
|
|
|
*/
|
|
|
|
void
|
2011-07-23 22:59:39 +02:00
|
|
|
recordDependencyOnCurrentExtension(const ObjectAddress *object,
|
|
|
|
bool isReplace)
|
2011-02-08 22:08:41 +01:00
|
|
|
{
|
2011-07-23 22:59:39 +02:00
|
|
|
/* Only whole objects can be extension members */
|
|
|
|
Assert(object->objectSubId == 0);
|
|
|
|
|
2011-02-08 22:08:41 +01:00
|
|
|
if (creating_extension)
|
|
|
|
{
|
2011-04-10 17:42:00 +02:00
|
|
|
ObjectAddress extension;
|
2011-02-08 22:08:41 +01:00
|
|
|
|
2011-07-23 22:59:39 +02:00
|
|
|
/* Only need to check for existing membership if isReplace */
|
|
|
|
if (isReplace)
|
|
|
|
{
|
2012-06-10 21:20:04 +02:00
|
|
|
Oid oldext;
|
2011-07-23 22:59:39 +02:00
|
|
|
|
|
|
|
oldext = getExtensionOfObject(object->classId, object->objectId);
|
|
|
|
if (OidIsValid(oldext))
|
|
|
|
{
|
|
|
|
/* If already a member of this extension, nothing to do */
|
|
|
|
if (oldext == CurrentExtensionObject)
|
|
|
|
return;
|
|
|
|
/* Already a member of some other extension, so reject */
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
|
|
|
errmsg("%s is already a member of extension \"%s\"",
|
|
|
|
getObjectDescription(object),
|
|
|
|
get_extension_name(oldext))));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, record it as a member of CurrentExtensionObject */
|
2011-02-08 22:08:41 +01:00
|
|
|
extension.classId = ExtensionRelationId;
|
|
|
|
extension.objectId = CurrentExtensionObject;
|
|
|
|
extension.objectSubId = 0;
|
|
|
|
|
|
|
|
recordDependencyOn(object, &extension, DEPENDENCY_EXTENSION);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-07-17 00:12:20 +02:00
|
|
|
/*
|
|
|
|
* deleteDependencyRecordsFor -- delete all records with given depender
|
2002-08-11 23:17:35 +02:00
|
|
|
* classId/objectId. Returns the number of records deleted.
|
2002-07-17 00:12:20 +02:00
|
|
|
*
|
|
|
|
* This is used when redefining an existing object. Links leading to the
|
|
|
|
* object do not change, and links leading from it will be recreated
|
|
|
|
* (possibly with some differences from before).
|
2011-02-08 22:08:41 +01:00
|
|
|
*
|
|
|
|
* If skipExtensionDeps is true, we do not delete any dependencies that
|
2011-04-10 17:42:00 +02:00
|
|
|
* show that the given object is a member of an extension. This avoids
|
2011-02-08 22:08:41 +01:00
|
|
|
* needing a lot of extra logic to fetch and recreate that dependency.
|
2002-07-17 00:12:20 +02:00
|
|
|
*/
|
2002-08-11 23:17:35 +02:00
|
|
|
long
|
2011-02-08 22:08:41 +01:00
|
|
|
deleteDependencyRecordsFor(Oid classId, Oid objectId,
|
|
|
|
bool skipExtensionDeps)
|
2002-07-17 00:12:20 +02:00
|
|
|
{
|
2002-09-04 22:31:48 +02:00
|
|
|
long count = 0;
|
|
|
|
Relation depRel;
|
|
|
|
ScanKeyData key[2];
|
|
|
|
SysScanDesc scan;
|
|
|
|
HeapTuple tup;
|
2002-07-17 00:12:20 +02:00
|
|
|
|
2005-04-14 22:03:27 +02:00
|
|
|
depRel = heap_open(DependRelationId, RowExclusiveLock);
|
2002-07-17 00:12:20 +02:00
|
|
|
|
2003-11-12 22:15:59 +01:00
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_classid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(classId));
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_objid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(objectId));
|
2002-07-17 00:12:20 +02:00
|
|
|
|
2005-04-14 22:03:27 +02:00
|
|
|
scan = systable_beginscan(depRel, DependDependerIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 2, key);
|
2002-07-17 00:12:20 +02:00
|
|
|
|
|
|
|
while (HeapTupleIsValid(tup = systable_getnext(scan)))
|
|
|
|
{
|
2011-02-08 22:08:41 +01:00
|
|
|
if (skipExtensionDeps &&
|
2011-04-10 17:42:00 +02:00
|
|
|
((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION)
|
2011-02-08 22:08:41 +01:00
|
|
|
continue;
|
|
|
|
|
2002-07-17 00:12:20 +02:00
|
|
|
simple_heap_delete(depRel, &tup->t_self);
|
2002-08-11 23:17:35 +02:00
|
|
|
count++;
|
2002-07-17 00:12:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
|
|
|
|
heap_close(depRel, RowExclusiveLock);
|
2002-08-11 23:17:35 +02:00
|
|
|
|
|
|
|
return count;
|
2002-07-17 00:12:20 +02:00
|
|
|
}
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2011-02-10 23:36:44 +01:00
|
|
|
/*
|
|
|
|
* deleteDependencyRecordsForClass -- delete all records with given depender
|
|
|
|
* classId/objectId, dependee classId, and deptype.
|
|
|
|
* Returns the number of records deleted.
|
|
|
|
*
|
|
|
|
* This is a variant of deleteDependencyRecordsFor, useful when revoking
|
|
|
|
* an object property that is expressed by a dependency record (such as
|
|
|
|
* extension membership).
|
|
|
|
*/
|
|
|
|
long
|
|
|
|
deleteDependencyRecordsForClass(Oid classId, Oid objectId,
|
|
|
|
Oid refclassId, char deptype)
|
|
|
|
{
|
|
|
|
long count = 0;
|
|
|
|
Relation depRel;
|
|
|
|
ScanKeyData key[2];
|
|
|
|
SysScanDesc scan;
|
|
|
|
HeapTuple tup;
|
|
|
|
|
|
|
|
depRel = heap_open(DependRelationId, RowExclusiveLock);
|
|
|
|
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_classid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(classId));
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_objid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(objectId));
|
|
|
|
|
|
|
|
scan = systable_beginscan(depRel, DependDependerIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 2, key);
|
2011-02-10 23:36:44 +01:00
|
|
|
|
|
|
|
while (HeapTupleIsValid(tup = systable_getnext(scan)))
|
|
|
|
{
|
|
|
|
Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
if (depform->refclassid == refclassId && depform->deptype == deptype)
|
|
|
|
{
|
|
|
|
simple_heap_delete(depRel, &tup->t_self);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
|
|
|
|
heap_close(depRel, RowExclusiveLock);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2005-08-01 06:03:59 +02:00
|
|
|
/*
|
|
|
|
* Adjust dependency record(s) to point to a different object of the same type
|
|
|
|
*
|
|
|
|
* classId/objectId specify the referencing object.
|
|
|
|
* refClassId/oldRefObjectId specify the old referenced object.
|
|
|
|
* newRefObjectId is the new referenced object (must be of class refClassId).
|
|
|
|
*
|
|
|
|
* Note the lack of objsubid parameters. If there are subobject references
|
|
|
|
* they will all be readjusted.
|
|
|
|
*
|
|
|
|
* Returns the number of records updated.
|
|
|
|
*/
|
|
|
|
long
|
|
|
|
changeDependencyFor(Oid classId, Oid objectId,
|
|
|
|
Oid refClassId, Oid oldRefObjectId,
|
|
|
|
Oid newRefObjectId)
|
|
|
|
{
|
|
|
|
long count = 0;
|
|
|
|
Relation depRel;
|
|
|
|
ScanKeyData key[2];
|
|
|
|
SysScanDesc scan;
|
|
|
|
HeapTuple tup;
|
|
|
|
ObjectAddress objAddr;
|
|
|
|
bool newIsPinned;
|
|
|
|
|
|
|
|
depRel = heap_open(DependRelationId, RowExclusiveLock);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* If oldRefObjectId is pinned, there won't be any dependency entries on
|
|
|
|
* it --- we can't cope in that case. (This isn't really worth expending
|
|
|
|
* code to fix, in current usage; it just means you can't rename stuff out
|
|
|
|
* of pg_catalog, which would likely be a bad move anyway.)
|
2005-08-01 06:03:59 +02:00
|
|
|
*/
|
|
|
|
objAddr.classId = refClassId;
|
|
|
|
objAddr.objectId = oldRefObjectId;
|
|
|
|
objAddr.objectSubId = 0;
|
|
|
|
|
|
|
|
if (isObjectPinned(&objAddr, depRel))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
2005-10-15 04:49:52 +02:00
|
|
|
errmsg("cannot remove dependency on %s because it is a system object",
|
|
|
|
getObjectDescription(&objAddr))));
|
2005-08-01 06:03:59 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* We can handle adding a dependency on something pinned, though, since
|
|
|
|
* that just means deleting the dependency entry.
|
2005-08-01 06:03:59 +02:00
|
|
|
*/
|
|
|
|
objAddr.objectId = newRefObjectId;
|
|
|
|
|
|
|
|
newIsPinned = isObjectPinned(&objAddr, depRel);
|
|
|
|
|
|
|
|
/* Now search for dependency records */
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_classid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(classId));
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_objid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(objectId));
|
|
|
|
|
|
|
|
scan = systable_beginscan(depRel, DependDependerIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 2, key);
|
2005-08-01 06:03:59 +02:00
|
|
|
|
|
|
|
while (HeapTupleIsValid((tup = systable_getnext(scan))))
|
|
|
|
{
|
|
|
|
Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
if (depform->refclassid == refClassId &&
|
|
|
|
depform->refobjid == oldRefObjectId)
|
|
|
|
{
|
|
|
|
if (newIsPinned)
|
|
|
|
simple_heap_delete(depRel, &tup->t_self);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* make a modifiable copy */
|
|
|
|
tup = heap_copytuple(tup);
|
|
|
|
depform = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
depform->refobjid = newRefObjectId;
|
|
|
|
|
|
|
|
simple_heap_update(depRel, &tup->t_self, tup);
|
|
|
|
CatalogUpdateIndexes(depRel, tup);
|
|
|
|
|
|
|
|
heap_freetuple(tup);
|
|
|
|
}
|
|
|
|
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
|
|
|
|
heap_close(depRel, RowExclusiveLock);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2007-12-02 00:44:44 +01:00
|
|
|
/*
|
|
|
|
* isObjectPinned()
|
|
|
|
*
|
|
|
|
* Test if an object is required for basic database functionality.
|
|
|
|
* Caller must already have opened pg_depend.
|
|
|
|
*
|
|
|
|
* The passed subId, if any, is ignored; we assume that only whole objects
|
|
|
|
* are pinned (and that this implies pinning their components).
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
isObjectPinned(const ObjectAddress *object, Relation rel)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
SysScanDesc scan;
|
|
|
|
HeapTuple tup;
|
|
|
|
ScanKeyData key[2];
|
|
|
|
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_refclassid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(object->classId));
|
|
|
|
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_refobjid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(object->objectId));
|
|
|
|
|
|
|
|
scan = systable_beginscan(rel, DependReferenceIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 2, key);
|
2007-12-02 00:44:44 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we won't generate additional pg_depend entries for pinned
|
|
|
|
* objects, there can be at most one entry referencing a pinned object.
|
|
|
|
* Hence, it's sufficient to look at the first returned tuple; we don't
|
|
|
|
* need to loop.
|
|
|
|
*/
|
|
|
|
tup = systable_getnext(scan);
|
|
|
|
if (HeapTupleIsValid(tup))
|
|
|
|
{
|
|
|
|
Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
if (foundDep->deptype == DEPENDENCY_PIN)
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Various special-purpose lookups and manipulations of pg_depend.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2011-02-08 22:08:41 +01:00
|
|
|
/*
|
|
|
|
* Find the extension containing the specified object, if any
|
|
|
|
*
|
|
|
|
* Returns the OID of the extension, or InvalidOid if the object does not
|
|
|
|
* belong to any extension.
|
|
|
|
*
|
|
|
|
* Extension membership is marked by an EXTENSION dependency from the object
|
|
|
|
* to the extension. Note that the result will be indeterminate if pg_depend
|
|
|
|
* contains links from this object to more than one extension ... but that
|
|
|
|
* should never happen.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
getExtensionOfObject(Oid classId, Oid objectId)
|
|
|
|
{
|
|
|
|
Oid result = InvalidOid;
|
|
|
|
Relation depRel;
|
|
|
|
ScanKeyData key[2];
|
|
|
|
SysScanDesc scan;
|
|
|
|
HeapTuple tup;
|
|
|
|
|
|
|
|
depRel = heap_open(DependRelationId, AccessShareLock);
|
|
|
|
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_classid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(classId));
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_objid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(objectId));
|
|
|
|
|
|
|
|
scan = systable_beginscan(depRel, DependDependerIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 2, key);
|
2011-02-08 22:08:41 +01:00
|
|
|
|
|
|
|
while (HeapTupleIsValid((tup = systable_getnext(scan))))
|
|
|
|
{
|
|
|
|
Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
if (depform->refclassid == ExtensionRelationId &&
|
|
|
|
depform->deptype == DEPENDENCY_EXTENSION)
|
|
|
|
{
|
|
|
|
result = depform->refobjid;
|
|
|
|
break; /* no need to keep scanning */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
|
|
|
|
heap_close(depRel, AccessShareLock);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2006-08-21 02:57:26 +02:00
|
|
|
/*
|
|
|
|
* Detect whether a sequence is marked as "owned" by a column
|
|
|
|
*
|
|
|
|
* An ownership marker is an AUTO dependency from the sequence to the
|
2006-10-04 02:30:14 +02:00
|
|
|
* column. If we find one, store the identity of the owning column
|
2006-08-21 02:57:26 +02:00
|
|
|
* into *tableId and *colId and return TRUE; else return FALSE.
|
|
|
|
*
|
|
|
|
* Note: if there's more than one such pg_depend entry then you get
|
|
|
|
* a random one of them returned into the out parameters. This should
|
|
|
|
* not happen, though.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
sequenceIsOwned(Oid seqId, Oid *tableId, int32 *colId)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
Relation depRel;
|
|
|
|
ScanKeyData key[2];
|
|
|
|
SysScanDesc scan;
|
|
|
|
HeapTuple tup;
|
|
|
|
|
|
|
|
depRel = heap_open(DependRelationId, AccessShareLock);
|
|
|
|
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_classid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(RelationRelationId));
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_objid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(seqId));
|
|
|
|
|
|
|
|
scan = systable_beginscan(depRel, DependDependerIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 2, key);
|
2006-08-21 02:57:26 +02:00
|
|
|
|
|
|
|
while (HeapTupleIsValid((tup = systable_getnext(scan))))
|
|
|
|
{
|
|
|
|
Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
if (depform->refclassid == RelationRelationId &&
|
|
|
|
depform->deptype == DEPENDENCY_AUTO)
|
|
|
|
{
|
|
|
|
*tableId = depform->refobjid;
|
|
|
|
*colId = depform->refobjsubid;
|
|
|
|
ret = true;
|
|
|
|
break; /* no need to keep scanning */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
|
|
|
|
heap_close(depRel, AccessShareLock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove any existing "owned" markers for the specified sequence.
|
|
|
|
*
|
|
|
|
* Note: we don't provide a special function to install an "owned"
|
|
|
|
* marker; just use recordDependencyOn().
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
markSequenceUnowned(Oid seqId)
|
|
|
|
{
|
2011-02-10 23:36:44 +01:00
|
|
|
deleteDependencyRecordsForClass(RelationRelationId, seqId,
|
|
|
|
RelationRelationId, DEPENDENCY_AUTO);
|
2006-08-21 02:57:26 +02:00
|
|
|
}
|
|
|
|
|
2008-05-17 01:36:05 +02:00
|
|
|
/*
|
|
|
|
* Collect a list of OIDs of all sequences owned by the specified relation.
|
|
|
|
*/
|
|
|
|
List *
|
|
|
|
getOwnedSequences(Oid relid)
|
|
|
|
{
|
|
|
|
List *result = NIL;
|
|
|
|
Relation depRel;
|
2009-06-11 16:49:15 +02:00
|
|
|
ScanKeyData key[2];
|
|
|
|
SysScanDesc scan;
|
2008-05-17 01:36:05 +02:00
|
|
|
HeapTuple tup;
|
|
|
|
|
|
|
|
depRel = heap_open(DependRelationId, AccessShareLock);
|
|
|
|
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_refclassid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(RelationRelationId));
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_refobjid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(relid));
|
|
|
|
|
|
|
|
scan = systable_beginscan(depRel, DependReferenceIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 2, key);
|
2008-05-17 01:36:05 +02:00
|
|
|
|
|
|
|
while (HeapTupleIsValid(tup = systable_getnext(scan)))
|
|
|
|
{
|
|
|
|
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We assume any auto dependency of a sequence on a column must be
|
|
|
|
* what we are looking for. (We need the relkind test because indexes
|
|
|
|
* can also have auto dependencies on columns.)
|
|
|
|
*/
|
|
|
|
if (deprec->classid == RelationRelationId &&
|
|
|
|
deprec->objsubid == 0 &&
|
|
|
|
deprec->refobjsubid != 0 &&
|
|
|
|
deprec->deptype == DEPENDENCY_AUTO &&
|
|
|
|
get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE)
|
|
|
|
{
|
|
|
|
result = lappend_oid(result, deprec->objid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
|
|
|
|
heap_close(depRel, AccessShareLock);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-12-02 00:44:44 +01:00
|
|
|
|
2002-07-12 20:43:19 +02:00
|
|
|
/*
|
2007-12-02 00:44:44 +01:00
|
|
|
* get_constraint_index
|
|
|
|
* Given the OID of a unique or primary-key constraint, return the
|
|
|
|
* OID of the underlying unique index.
|
2002-07-12 20:43:19 +02:00
|
|
|
*
|
2007-12-02 00:44:44 +01:00
|
|
|
* Return InvalidOid if the index couldn't be found; this suggests the
|
|
|
|
* given OID is bogus, but we leave it to caller to decide what to do.
|
2002-07-12 20:43:19 +02:00
|
|
|
*/
|
2007-12-02 00:44:44 +01:00
|
|
|
Oid
|
|
|
|
get_constraint_index(Oid constraintId)
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
2007-12-02 00:44:44 +01:00
|
|
|
Oid indexId = InvalidOid;
|
|
|
|
Relation depRel;
|
|
|
|
ScanKeyData key[3];
|
2002-09-04 22:31:48 +02:00
|
|
|
SysScanDesc scan;
|
2002-07-12 20:43:19 +02:00
|
|
|
HeapTuple tup;
|
2007-12-02 00:44:44 +01:00
|
|
|
|
|
|
|
/* Search the dependency table for the dependent index */
|
|
|
|
depRel = heap_open(DependRelationId, AccessShareLock);
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2003-11-12 22:15:59 +01:00
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_refclassid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
2007-12-02 00:44:44 +01:00
|
|
|
ObjectIdGetDatum(ConstraintRelationId));
|
2003-11-12 22:15:59 +01:00
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_refobjid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
2007-12-02 00:44:44 +01:00
|
|
|
ObjectIdGetDatum(constraintId));
|
|
|
|
ScanKeyInit(&key[2],
|
|
|
|
Anum_pg_depend_refobjsubid,
|
|
|
|
BTEqualStrategyNumber, F_INT4EQ,
|
|
|
|
Int32GetDatum(0));
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2007-12-02 00:44:44 +01:00
|
|
|
scan = systable_beginscan(depRel, DependReferenceIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 3, key);
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2007-12-02 00:44:44 +01:00
|
|
|
while (HeapTupleIsValid(tup = systable_getnext(scan)))
|
2002-07-12 20:43:19 +02:00
|
|
|
{
|
2007-12-02 00:44:44 +01:00
|
|
|
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2007-12-02 00:44:44 +01:00
|
|
|
/*
|
|
|
|
* We assume any internal dependency of an index on the constraint
|
|
|
|
* must be what we are looking for. (The relkind test is just
|
|
|
|
* paranoia; there shouldn't be any such dependencies otherwise.)
|
|
|
|
*/
|
|
|
|
if (deprec->classid == RelationRelationId &&
|
|
|
|
deprec->objsubid == 0 &&
|
|
|
|
deprec->deptype == DEPENDENCY_INTERNAL &&
|
|
|
|
get_rel_relkind(deprec->objid) == RELKIND_INDEX)
|
|
|
|
{
|
|
|
|
indexId = deprec->objid;
|
|
|
|
break;
|
|
|
|
}
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
2007-12-02 00:44:44 +01:00
|
|
|
heap_close(depRel, AccessShareLock);
|
2002-07-12 20:43:19 +02:00
|
|
|
|
2007-12-02 00:44:44 +01:00
|
|
|
return indexId;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get_index_constraint
|
|
|
|
* Given the OID of an index, return the OID of the owning unique or
|
|
|
|
* primary-key constraint, or InvalidOid if no such constraint.
|
|
|
|
*/
|
|
|
|
Oid
|
|
|
|
get_index_constraint(Oid indexId)
|
|
|
|
{
|
|
|
|
Oid constraintId = InvalidOid;
|
|
|
|
Relation depRel;
|
|
|
|
ScanKeyData key[3];
|
|
|
|
SysScanDesc scan;
|
|
|
|
HeapTuple tup;
|
|
|
|
|
|
|
|
/* Search the dependency table for the index */
|
|
|
|
depRel = heap_open(DependRelationId, AccessShareLock);
|
|
|
|
|
|
|
|
ScanKeyInit(&key[0],
|
|
|
|
Anum_pg_depend_classid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(RelationRelationId));
|
|
|
|
ScanKeyInit(&key[1],
|
|
|
|
Anum_pg_depend_objid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
|
|
|
ObjectIdGetDatum(indexId));
|
|
|
|
ScanKeyInit(&key[2],
|
|
|
|
Anum_pg_depend_objsubid,
|
|
|
|
BTEqualStrategyNumber, F_INT4EQ,
|
|
|
|
Int32GetDatum(0));
|
|
|
|
|
|
|
|
scan = systable_beginscan(depRel, DependDependerIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
NULL, 3, key);
|
2007-12-02 00:44:44 +01:00
|
|
|
|
|
|
|
while (HeapTupleIsValid(tup = systable_getnext(scan)))
|
|
|
|
{
|
|
|
|
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
|
|
|
|
|
|
|
|
/*
|
2009-06-11 16:49:15 +02:00
|
|
|
* We assume any internal dependency on a constraint must be what we
|
|
|
|
* are looking for.
|
2007-12-02 00:44:44 +01:00
|
|
|
*/
|
|
|
|
if (deprec->refclassid == ConstraintRelationId &&
|
|
|
|
deprec->refobjsubid == 0 &&
|
|
|
|
deprec->deptype == DEPENDENCY_INTERNAL)
|
|
|
|
{
|
|
|
|
constraintId = deprec->refobjid;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
systable_endscan(scan);
|
|
|
|
heap_close(depRel, AccessShareLock);
|
|
|
|
|
|
|
|
return constraintId;
|
2002-07-12 20:43:19 +02:00
|
|
|
}
|