2012-07-20 17:38:47 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* evtcache.c
|
|
|
|
* Special-purpose cache for event trigger data.
|
|
|
|
*
|
2023-01-02 21:00:37 +01:00
|
|
|
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
|
2012-07-20 17:38:47 +02:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
|
|
|
* src/backend/utils/cache/evtcache.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
2019-12-27 00:09:00 +01:00
|
|
|
#include "access/genam.h"
|
2012-08-30 22:15:44 +02:00
|
|
|
#include "access/htup_details.h"
|
2019-01-21 19:18:20 +01:00
|
|
|
#include "access/relation.h"
|
2019-11-12 04:00:16 +01:00
|
|
|
#include "catalog/pg_event_trigger.h"
|
2012-07-20 17:38:47 +02:00
|
|
|
#include "catalog/pg_type.h"
|
|
|
|
#include "commands/trigger.h"
|
2020-03-02 22:19:51 +01:00
|
|
|
#include "tcop/cmdtag.h"
|
2012-07-20 17:38:47 +02:00
|
|
|
#include "utils/array.h"
|
|
|
|
#include "utils/builtins.h"
|
2012-08-29 00:02:07 +02:00
|
|
|
#include "utils/catcache.h"
|
2012-07-20 17:38:47 +02:00
|
|
|
#include "utils/evtcache.h"
|
2019-11-12 04:00:16 +01:00
|
|
|
#include "utils/hsearch.h"
|
2012-07-20 17:38:47 +02:00
|
|
|
#include "utils/inval.h"
|
|
|
|
#include "utils/memutils.h"
|
|
|
|
#include "utils/rel.h"
|
|
|
|
#include "utils/snapmgr.h"
|
|
|
|
#include "utils/syscache.h"
|
|
|
|
|
2012-08-08 22:38:37 +02:00
|
|
|
typedef enum
|
|
|
|
{
|
|
|
|
ETCS_NEEDS_REBUILD,
|
|
|
|
ETCS_REBUILD_STARTED,
|
|
|
|
ETCS_VALID
|
|
|
|
} EventTriggerCacheStateType;
|
|
|
|
|
2012-07-20 17:38:47 +02:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
EventTriggerEvent event;
|
|
|
|
List *triggerlist;
|
|
|
|
} EventTriggerCacheEntry;
|
|
|
|
|
|
|
|
static HTAB *EventTriggerCache;
|
|
|
|
static MemoryContext EventTriggerCacheContext;
|
2012-08-08 22:38:37 +02:00
|
|
|
static EventTriggerCacheStateType EventTriggerCacheState = ETCS_NEEDS_REBUILD;
|
2012-07-20 17:38:47 +02:00
|
|
|
|
|
|
|
static void BuildEventTriggerCache(void);
|
|
|
|
static void InvalidateEventCacheCallback(Datum arg,
|
|
|
|
int cacheid, uint32 hashvalue);
|
2020-03-02 22:19:51 +01:00
|
|
|
static Bitmapset *DecodeTextArrayToBitmapset(Datum array);
|
2012-07-20 17:38:47 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Search the event cache by trigger event.
|
|
|
|
*
|
|
|
|
* Note that the caller had better copy any data it wants to keep around
|
|
|
|
* across any operation that might touch a system catalog into some other
|
|
|
|
* memory context, since a cache reset could blow the return value away.
|
|
|
|
*/
|
|
|
|
List *
|
|
|
|
EventCacheLookup(EventTriggerEvent event)
|
|
|
|
{
|
|
|
|
EventTriggerCacheEntry *entry;
|
|
|
|
|
2012-08-08 22:38:37 +02:00
|
|
|
if (EventTriggerCacheState != ETCS_VALID)
|
2012-07-20 17:38:47 +02:00
|
|
|
BuildEventTriggerCache();
|
|
|
|
entry = hash_search(EventTriggerCache, &event, HASH_FIND, NULL);
|
2017-06-06 17:21:22 +02:00
|
|
|
return entry != NULL ? entry->triggerlist : NIL;
|
2012-07-20 17:38:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rebuild the event trigger cache.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
BuildEventTriggerCache(void)
|
|
|
|
{
|
|
|
|
HASHCTL ctl;
|
|
|
|
HTAB *cache;
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
Relation rel;
|
|
|
|
Relation irel;
|
|
|
|
SysScanDesc scan;
|
|
|
|
|
|
|
|
if (EventTriggerCacheContext != NULL)
|
|
|
|
{
|
|
|
|
/*
|
2012-08-08 22:38:37 +02:00
|
|
|
* Free up any memory already allocated in EventTriggerCacheContext.
|
|
|
|
* This can happen either because a previous rebuild failed, or
|
|
|
|
* because an invalidation happened before the rebuild was complete.
|
2012-07-20 17:38:47 +02:00
|
|
|
*/
|
2012-08-07 22:59:42 +02:00
|
|
|
MemoryContextResetAndDeleteChildren(EventTriggerCacheContext);
|
2012-07-20 17:38:47 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This is our first time attempting to build the cache, so we need to
|
|
|
|
* set up the memory context and register a syscache callback to
|
|
|
|
* capture future invalidation events.
|
|
|
|
*/
|
|
|
|
if (CacheMemoryContext == NULL)
|
|
|
|
CreateCacheMemoryContext();
|
|
|
|
EventTriggerCacheContext =
|
|
|
|
AllocSetContextCreate(CacheMemoryContext,
|
|
|
|
"EventTriggerCache",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 23:50:38 +02:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2012-07-20 17:38:47 +02:00
|
|
|
CacheRegisterSyscacheCallback(EVENTTRIGGEROID,
|
|
|
|
InvalidateEventCacheCallback,
|
|
|
|
(Datum) 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Switch to correct memory context. */
|
|
|
|
oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext);
|
|
|
|
|
2012-08-08 22:38:37 +02:00
|
|
|
/* Prevent the memory context from being nuked while we're rebuilding. */
|
|
|
|
EventTriggerCacheState = ETCS_REBUILD_STARTED;
|
|
|
|
|
|
|
|
/* Create new hash table. */
|
2012-07-20 17:38:47 +02:00
|
|
|
ctl.keysize = sizeof(EventTriggerEvent);
|
|
|
|
ctl.entrysize = sizeof(EventTriggerCacheEntry);
|
2012-08-07 22:59:42 +02:00
|
|
|
ctl.hcxt = EventTriggerCacheContext;
|
2023-07-05 08:53:09 +02:00
|
|
|
cache = hash_create("EventTriggerCacheHash", 32, &ctl,
|
Improve hash_create's API for selecting simple-binary-key hash functions.
Previously, if you wanted anything besides C-string hash keys, you had to
specify a custom hashing function to hash_create(). Nearly all such
callers were specifying tag_hash or oid_hash; which is tedious, and rather
error-prone, since a caller could easily miss the opportunity to optimize
by using hash_uint32 when appropriate. Replace this with a design whereby
callers using simple binary-data keys just specify HASH_BLOBS and don't
need to mess with specific support functions. hash_create() itself will
take care of optimizing when the key size is four bytes.
This nets out saving a few hundred bytes of code space, and offers
a measurable performance improvement in tidbitmap.c (which was not
exploiting the opportunity to use hash_uint32 for its 4-byte keys).
There might be some wins elsewhere too, I didn't analyze closely.
In future we could look into offering a similar optimized hashing function
for 8-byte keys. Under this design that could be done in a centralized
and machine-independent fashion, whereas getting it right for keys of
platform-dependent sizes would've been notationally painful before.
For the moment, the old way still works fine, so as not to break source
code compatibility for loadable modules. Eventually we might want to
remove tag_hash and friends from the exported API altogether, since there's
no real need for them to be explicitly referenced from outside dynahash.c.
Teodor Sigaev and Tom Lane
2014-12-18 19:36:29 +01:00
|
|
|
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
2012-07-20 17:38:47 +02:00
|
|
|
|
|
|
|
/*
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
* Prepare to scan pg_event_trigger in name order.
|
2012-07-20 17:38:47 +02:00
|
|
|
*/
|
|
|
|
rel = relation_open(EventTriggerRelationId, AccessShareLock);
|
|
|
|
irel = index_open(EventTriggerNameIndexId, AccessShareLock);
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 15:47:01 +02:00
|
|
|
scan = systable_beginscan_ordered(rel, irel, NULL, 0, NULL);
|
2012-07-20 17:38:47 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Build a cache item for each pg_event_trigger tuple, and append each one
|
|
|
|
* to the appropriate cache entry.
|
|
|
|
*/
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
HeapTuple tup;
|
|
|
|
Form_pg_event_trigger form;
|
|
|
|
char *evtevent;
|
|
|
|
EventTriggerEvent event;
|
|
|
|
EventTriggerCacheItem *item;
|
|
|
|
Datum evttags;
|
|
|
|
bool evttags_isnull;
|
|
|
|
EventTriggerCacheEntry *entry;
|
|
|
|
bool found;
|
|
|
|
|
|
|
|
/* Get next tuple. */
|
|
|
|
tup = systable_getnext_ordered(scan, ForwardScanDirection);
|
|
|
|
if (!HeapTupleIsValid(tup))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Skip trigger if disabled. */
|
|
|
|
form = (Form_pg_event_trigger) GETSTRUCT(tup);
|
|
|
|
if (form->evtenabled == TRIGGER_DISABLED)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Decode event name. */
|
|
|
|
evtevent = NameStr(form->evtevent);
|
|
|
|
if (strcmp(evtevent, "ddl_command_start") == 0)
|
|
|
|
event = EVT_DDLCommandStart;
|
2013-01-22 00:00:24 +01:00
|
|
|
else if (strcmp(evtevent, "ddl_command_end") == 0)
|
|
|
|
event = EVT_DDLCommandEnd;
|
Add sql_drop event for event triggers
This event takes place just before ddl_command_end, and is fired if and
only if at least one object has been dropped by the command. (For
instance, DROP TABLE IF EXISTS of a table that does not in fact exist
will not lead to such a trigger firing). Commands that drop multiple
objects (such as DROP SCHEMA or DROP OWNED BY) will cause a single event
to fire. Some firings might be surprising, such as
ALTER TABLE DROP COLUMN.
The trigger is fired after the drop has taken place, because that has
been deemed the safest design, to avoid exposing possibly-inconsistent
internal state (system catalogs as well as current transaction) to the
user function code. This means that careful tracking of object
identification is required during the object removal phase.
Like other currently existing events, there is support for tag
filtering.
To support the new event, add a new pg_event_trigger_dropped_objects()
set-returning function, which returns a set of rows comprising the
objects affected by the command. This is to be used within the user
function code, and is mostly modelled after the recently introduced
pg_identify_object() function.
Catalog version bumped due to the new function.
Dimitri Fontaine and Álvaro Herrera
Review by Robert Haas, Tom Lane
2013-03-27 20:02:10 +01:00
|
|
|
else if (strcmp(evtevent, "sql_drop") == 0)
|
|
|
|
event = EVT_SQLDrop;
|
2014-12-07 16:55:28 +01:00
|
|
|
else if (strcmp(evtevent, "table_rewrite") == 0)
|
|
|
|
event = EVT_TableRewrite;
|
2012-07-20 17:38:47 +02:00
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Allocate new cache item. */
|
|
|
|
item = palloc0(sizeof(EventTriggerCacheItem));
|
|
|
|
item->fnoid = form->evtfoid;
|
|
|
|
item->enabled = form->evtenabled;
|
|
|
|
|
|
|
|
/* Decode and sort tags array. */
|
|
|
|
evttags = heap_getattr(tup, Anum_pg_event_trigger_evttags,
|
|
|
|
RelationGetDescr(rel), &evttags_isnull);
|
|
|
|
if (!evttags_isnull)
|
2020-03-02 22:19:51 +01:00
|
|
|
item->tagset = DecodeTextArrayToBitmapset(evttags);
|
2012-07-20 17:38:47 +02:00
|
|
|
|
|
|
|
/* Add to cache entry. */
|
|
|
|
entry = hash_search(cache, &event, HASH_ENTER, &found);
|
|
|
|
if (found)
|
|
|
|
entry->triggerlist = lappend(entry->triggerlist, item);
|
|
|
|
else
|
|
|
|
entry->triggerlist = list_make1(item);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Done with pg_event_trigger scan. */
|
|
|
|
systable_endscan_ordered(scan);
|
|
|
|
index_close(irel, AccessShareLock);
|
|
|
|
relation_close(rel, AccessShareLock);
|
|
|
|
|
|
|
|
/* Restore previous memory context. */
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
2012-08-08 22:38:37 +02:00
|
|
|
/* Install new cache. */
|
2012-07-20 17:38:47 +02:00
|
|
|
EventTriggerCache = cache;
|
2012-08-08 22:38:37 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the cache has been invalidated since we entered this routine, we
|
|
|
|
* still use and return the cache we just finished constructing, to avoid
|
|
|
|
* infinite loops, but we leave the cache marked stale so that we'll
|
|
|
|
* rebuild it again on next access. Otherwise, we mark the cache valid.
|
|
|
|
*/
|
|
|
|
if (EventTriggerCacheState == ETCS_REBUILD_STARTED)
|
|
|
|
EventTriggerCacheState = ETCS_VALID;
|
2012-07-20 17:38:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-03-02 22:19:51 +01:00
|
|
|
* Decode text[] to a Bitmapset of CommandTags.
|
2012-07-20 17:38:47 +02:00
|
|
|
*
|
|
|
|
* We could avoid a bit of overhead here if we were willing to duplicate some
|
|
|
|
* of the logic from deconstruct_array, but it doesn't seem worth the code
|
|
|
|
* complexity.
|
|
|
|
*/
|
2020-03-02 22:19:51 +01:00
|
|
|
static Bitmapset *
|
|
|
|
DecodeTextArrayToBitmapset(Datum array)
|
2012-07-20 17:38:47 +02:00
|
|
|
{
|
|
|
|
ArrayType *arr = DatumGetArrayTypeP(array);
|
|
|
|
Datum *elems;
|
2020-03-02 22:19:51 +01:00
|
|
|
Bitmapset *bms;
|
2012-07-20 17:38:47 +02:00
|
|
|
int i;
|
|
|
|
int nelems;
|
|
|
|
|
|
|
|
if (ARR_NDIM(arr) != 1 || ARR_HASNULL(arr) || ARR_ELEMTYPE(arr) != TEXTOID)
|
|
|
|
elog(ERROR, "expected 1-D text array");
|
2022-07-01 10:51:45 +02:00
|
|
|
deconstruct_array_builtin(arr, TEXTOID, &elems, NULL, &nelems);
|
2012-07-20 17:38:47 +02:00
|
|
|
|
2020-03-02 22:19:51 +01:00
|
|
|
for (bms = NULL, i = 0; i < nelems; ++i)
|
|
|
|
{
|
|
|
|
char *str = TextDatumGetCString(elems[i]);
|
|
|
|
|
|
|
|
bms = bms_add_member(bms, GetCommandTagEnum(str));
|
|
|
|
pfree(str);
|
|
|
|
}
|
2012-07-20 17:38:47 +02:00
|
|
|
|
|
|
|
pfree(elems);
|
2020-03-02 22:19:51 +01:00
|
|
|
|
|
|
|
return bms;
|
2012-07-20 17:38:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush all cache entries when pg_event_trigger is updated.
|
|
|
|
*
|
|
|
|
* This should be rare enough that we don't need to be very granular about
|
|
|
|
* it, so we just blow away everything, which also avoids the possibility of
|
|
|
|
* memory leaks.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
InvalidateEventCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
|
|
|
|
{
|
2012-08-08 22:38:37 +02:00
|
|
|
/*
|
|
|
|
* If the cache isn't valid, then there might be a rebuild in progress, so
|
|
|
|
* we can't immediately blow it away. But it's advantageous to do this
|
|
|
|
* when possible, so as to immediately free memory.
|
|
|
|
*/
|
|
|
|
if (EventTriggerCacheState == ETCS_VALID)
|
|
|
|
{
|
|
|
|
MemoryContextResetAndDeleteChildren(EventTriggerCacheContext);
|
|
|
|
EventTriggerCache = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark cache for rebuild. */
|
|
|
|
EventTriggerCacheState = ETCS_NEEDS_REBUILD;
|
2012-07-20 17:38:47 +02:00
|
|
|
}
|