summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/storage/buffer/bufmgr.c66
-rw-r--r--src/backend/storage/lmgr/lwlock.c15
-rw-r--r--src/backend/utils/adt/pg_locale.c3
-rw-r--r--src/backend/utils/cache/catcache.c51
-rw-r--r--src/backend/utils/cache/inval.c14
-rw-r--r--src/backend/utils/cache/relcache.c20
-rw-r--r--src/backend/utils/mb/mbutils.c3
-rw-r--r--src/include/storage/bufmgr.h3
-rw-r--r--src/include/storage/lwlock.h2
-rw-r--r--src/include/utils/relcache.h8
10 files changed, 165 insertions, 20 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 79e6d6fc3d5..f563742ad44 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -36,6 +36,9 @@
#include "access/tableam.h"
#include "access/xlog.h"
#include "catalog/catalog.h"
+#ifdef USE_ASSERT_CHECKING
+#include "catalog/pg_tablespace_d.h"
+#endif
#include "catalog/storage.h"
#include "executor/instrument.h"
#include "lib/binaryheap.h"
@@ -487,6 +490,10 @@ static void FindAndDropRelFileNodeBuffers(RelFileNode rnode,
BlockNumber firstDelBlock);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
+#ifdef USE_ASSERT_CHECKING
+static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context);
+#endif
static int rnode_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *a, const BufferTag *b);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
@@ -2697,6 +2704,65 @@ CheckForBufferLeaks(void)
#endif
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * Check for exclusive-locked catalog buffers. This is the core of
+ * AssertCouldGetRelation().
+ *
+ * A backend would self-deadlock on LWLocks if the catalog scan read the
+ * exclusive-locked buffer. The main threat is exclusive-locked buffers of
+ * catalogs used in relcache, because a catcache search on any catalog may
+ * build that catalog's relcache entry. We don't have an inventory of
+ * catalogs relcache uses, so just check buffers of most catalogs.
+ *
+ * It's better to minimize waits while holding an exclusive buffer lock, so it
+ * would be nice to broaden this check not to be catalog-specific. However,
+ * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
+ * read tables. That is deadlock-free as long as there's no loop in the
+ * dependency graph: modifying table A may cause an opclass to read table B,
+ * but it must not cause a read of table A.
+ */
+void
+AssertBufferLocksPermitCatalogRead(void)
+{
+ ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
+}
+
+static void
+AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context)
+{
+ BufferDesc *bufHdr;
+ BufferTag tag;
+ Oid relid;
+
+ if (mode != LW_EXCLUSIVE)
+ return;
+
+ if (!((BufferDescPadded *) lock > BufferDescriptors &&
+ (BufferDescPadded *) lock < BufferDescriptors + NBuffers))
+ return; /* not a buffer lock */
+
+ bufHdr = (BufferDesc *)
+ ((char *) lock - offsetof(BufferDesc, content_lock));
+ tag = bufHdr->tag;
+
+ /*
+ * This relNode==relid assumption holds until a catalog experiences VACUUM
+ * FULL or similar. After a command like that, relNode will be in the
+ * normal (non-catalog) range, and we lose the ability to detect hazardous
+ * access to that catalog. Calling RelidByRelfilenode() would close that
+ * gap, but RelidByRelfilenode() might then deadlock with a held lock.
+ */
+ relid = tag.rnode.relNode;
+
+ Assert(!IsCatalogRelationOid(relid));
+ /* Shared rels are always catalogs: detect even after VACUUM FULL. */
+ Assert(tag.rnode.spcNode != GLOBALTABLESPACE_OID);
+}
+#endif
+
+
/*
* Helper routine to issue warnings when a buffer is unexpectedly pinned
*/
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index bb2d894393e..02f79af3a46 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1922,6 +1922,21 @@ LWLockReleaseAll(void)
/*
+ * ForEachLWLockHeldByMe - run a callback for each held lock
+ *
+ * This is meant as debug support only.
+ */
+void
+ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
+ void *context)
+{
+ int i;
+
+ for (i = 0; i < num_held_lwlocks; i++)
+ callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
+}
+
+/*
* LWLockHeldByMe - test whether my process holds a lock in any mode
*
* This is meant as debug support only.
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index bf523c18249..8ae101cefc2 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -64,6 +64,7 @@
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_locale.h"
+#include "utils/relcache.h"
#include "utils/syscache.h"
#ifdef USE_ICU
@@ -1262,6 +1263,8 @@ lookup_collation_cache(Oid collation, bool set_flags)
Assert(OidIsValid(collation));
Assert(collation != DEFAULT_COLLATION_OID);
+ AssertCouldGetRelation();
+
if (collation_cache == NULL)
{
/* First time through, initialize the hash table */
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 68905b006e2..655920e019a 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -943,11 +943,40 @@ RehashCatCache(CatCache *cp)
}
/*
+ * ConditionalCatalogCacheInitializeCache
+ *
+ * Call CatalogCacheInitializeCache() if not yet done.
+ */
+pg_attribute_always_inline
+static void
+ConditionalCatalogCacheInitializeCache(CatCache *cache)
+{
+#ifdef USE_ASSERT_CHECKING
+ /*
+ * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
+ * for hashing. This isn't ideal. Since lookup_type_cache() both
+ * registers the callback and searches TYPEOID, reaching trouble likely
+ * requires OOM at an unlucky moment.
+ *
+ * InvalidateAttoptCacheCallback() runs outside transactions and likewise
+ * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
+ */
+ if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
+ IsTransactionState())
+ AssertCouldGetRelation();
+ else
+ Assert(cache->cc_tupdesc != NULL);
+#endif
+
+ if (unlikely(cache->cc_tupdesc == NULL))
+ CatalogCacheInitializeCache(cache);
+}
+
+/*
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
- * that the relcache entry can be opened at this point!
+ * descriptor and set up the hash and equality function links.
*/
#ifdef CACHEDEBUG
#define CatalogCacheInitializeCache_DEBUG1 \
@@ -1082,8 +1111,7 @@ CatalogCacheInitializeCache(CatCache *cache)
void
InitCatCachePhase2(CatCache *cache, bool touch_index)
{
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
if (touch_index &&
cache->id != AMOID &&
@@ -1262,16 +1290,12 @@ SearchCatCacheInternal(CatCache *cache,
dlist_head *bucket;
CatCTup *ct;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
-
Assert(cache->cc_nkeys == nkeys);
/*
* one-time startup overhead for each cache
*/
- if (unlikely(cache->cc_tupdesc == NULL))
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
#ifdef CATCACHE_STATS
cache->cc_searches++;
@@ -1550,8 +1574,7 @@ GetCatCacheHashValue(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
/*
* calculate the hash value
@@ -1600,8 +1623,7 @@ SearchCatCacheList(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
@@ -2226,8 +2248,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
continue;
/* Just in case cache hasn't finished initialization yet... */
- if (ccp->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(ccp);
+ ConditionalCatalogCacheInitializeCache(ccp);
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 879340831f1..e1f0cf3347f 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -720,6 +720,12 @@ InvalidateSystemCachesExtended(bool debug_discard)
void
AcceptInvalidationMessages(void)
{
+#ifdef USE_ASSERT_CHECKING
+ /* message handlers shall access catalogs only during transactions */
+ if (IsTransactionState())
+ AssertCouldGetRelation();
+#endif
+
ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
InvalidateSystemCaches);
@@ -771,7 +777,8 @@ PrepareInvalidationState(void)
{
TransInvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
/* Can't queue transactional message while collecting inplace messages. */
Assert(inplaceInvalInfo == NULL);
@@ -807,7 +814,7 @@ PrepareInplaceInvalidationState(void)
{
InvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/* limit of one inplace update under assembly */
Assert(inplaceInvalInfo == NULL);
@@ -1248,6 +1255,9 @@ CacheInvalidateHeapTupleCommon(Relation relation,
Oid databaseId;
Oid relationId;
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
+
/* Do nothing during bootstrap */
if (IsBootstrapProcessingMode())
return;
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 01ff1528da9..70d197292d3 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2020,6 +2020,23 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_isvalid = true;
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * AssertCouldGetRelation
+ *
+ * Check safety of calling RelationIdGetRelation().
+ *
+ * In code that reads catalogs in the event of a cache miss, call this
+ * before checking the cache.
+ */
+void
+AssertCouldGetRelation(void)
+{
+ Assert(IsTransactionState());
+ AssertBufferLocksPermitCatalogRead();
+}
+#endif
+
/* ----------------------------------------------------------------
* Relation Descriptor Lookup Interface
@@ -2047,8 +2064,7 @@ RelationIdGetRelation(Oid relationId)
{
Relation rd;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/*
* first try to find reldesc in the cache
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 1abe90d4aa5..b308ff826fe 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -39,6 +39,7 @@
#include "mb/pg_wchar.h"
#include "utils/builtins.h"
#include "utils/memutils.h"
+#include "utils/relcache.h"
#include "utils/syscache.h"
/*
@@ -310,7 +311,7 @@ InitializeClientEncoding(void)
{
Oid utf8_to_server_proc;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
utf8_to_server_proc =
FindDefaultConversionProc(PG_UTF8,
current_server_encoding);
diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h
index aa64fb42ec4..85caac3253d 100644
--- a/src/include/storage/bufmgr.h
+++ b/src/include/storage/bufmgr.h
@@ -196,6 +196,9 @@ extern void InitBufferPool(void);
extern void InitBufferPoolAccess(void);
extern void InitBufferPoolBackend(void);
extern void AtEOXact_Buffers(bool isCommit);
+#ifdef USE_ASSERT_CHECKING
+extern void AssertBufferLocksPermitCatalogRead(void);
+#endif
extern void PrintBufferLeakWarning(Buffer buffer);
extern void CheckPointBuffers(int flags);
extern BlockNumber BufferGetBlockNumber(Buffer buffer);
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index b2c9a9dbbcc..6ef1bee18a2 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -127,6 +127,8 @@ extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
extern void LWLockRelease(LWLock *lock);
extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
extern void LWLockReleaseAll(void);
+extern void ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
+ void *context);
extern bool LWLockHeldByMe(LWLock *lock);
extern bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride);
extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index aa060ef115b..08fb6120579 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -36,6 +36,14 @@ typedef Relation *RelationPtr;
/*
* Routines to open (lookup) and close a relcache entry
*/
+#ifdef USE_ASSERT_CHECKING
+extern void AssertCouldGetRelation(void);
+#else
+static inline void
+AssertCouldGetRelation(void)
+{
+}
+#endif
extern Relation RelationIdGetRelation(Oid relationId);
extern void RelationClose(Relation relation);