summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/storage/buffer/bufmgr.c67
-rw-r--r--src/backend/storage/lmgr/lwlock.c15
-rw-r--r--src/backend/utils/adt/pg_locale.c3
-rw-r--r--src/backend/utils/cache/catcache.c51
-rw-r--r--src/backend/utils/cache/inval.c14
-rw-r--r--src/backend/utils/cache/relcache.c20
-rw-r--r--src/backend/utils/mb/mbutils.c3
-rw-r--r--src/include/storage/bufmgr.h3
-rw-r--r--src/include/storage/lwlock.h2
-rw-r--r--src/include/utils/relcache.h8
10 files changed, 166 insertions, 20 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index f8d30bf71e1..7daf1bed041 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -40,6 +40,9 @@
#include "access/tableam.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
+#ifdef USE_ASSERT_CHECKING
+#include "catalog/pg_tablespace_d.h"
+#endif
#include "catalog/storage.h"
#include "catalog/storage_xlog.h"
#include "executor/instrument.h"
@@ -535,6 +538,10 @@ static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
ForkNumber forkNum, bool permanent);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
+#ifdef USE_ASSERT_CHECKING
+static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context);
+#endif
static int rlocator_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
@@ -3647,6 +3654,66 @@ CheckForBufferLeaks(void)
#endif
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * Check for exclusive-locked catalog buffers. This is the core of
+ * AssertCouldGetRelation().
+ *
+ * A backend would self-deadlock on LWLocks if the catalog scan read the
+ * exclusive-locked buffer. The main threat is exclusive-locked buffers of
+ * catalogs used in relcache, because a catcache search on any catalog may
+ * build that catalog's relcache entry. We don't have an inventory of
+ * catalogs relcache uses, so just check buffers of most catalogs.
+ *
+ * It's better to minimize waits while holding an exclusive buffer lock, so it
+ * would be nice to broaden this check not to be catalog-specific. However,
+ * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
+ * read tables. That is deadlock-free as long as there's no loop in the
+ * dependency graph: modifying table A may cause an opclass to read table B,
+ * but it must not cause a read of table A.
+ */
+void
+AssertBufferLocksPermitCatalogRead(void)
+{
+ ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
+}
+
+static void
+AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
+ void *unused_context)
+{
+ BufferDesc *bufHdr;
+ BufferTag tag;
+ Oid relid;
+
+ if (mode != LW_EXCLUSIVE)
+ return;
+
+ if (!((BufferDescPadded *) lock > BufferDescriptors &&
+ (BufferDescPadded *) lock < BufferDescriptors + NBuffers))
+ return; /* not a buffer lock */
+
+ bufHdr = (BufferDesc *)
+ ((char *) lock - offsetof(BufferDesc, content_lock));
+ tag = bufHdr->tag;
+
+ /*
+ * This relNumber==relid assumption holds until a catalog experiences
+ * VACUUM FULL or similar. After a command like that, relNumber will be
+ * in the normal (non-catalog) range, and we lose the ability to detect
+ * hazardous access to that catalog. Calling RelidByRelfilenumber() would
+ * close that gap, but RelidByRelfilenumber() might then deadlock with a
+ * held lock.
+ */
+ relid = tag.relNumber;
+
+ Assert(!IsCatalogRelationOid(relid));
+ /* Shared rels are always catalogs: detect even after VACUUM FULL. */
+ Assert(tag.spcOid != GLOBALTABLESPACE_OID);
+}
+#endif
+
+
/*
* Helper routine to issue warnings when a buffer is unexpectedly pinned
*/
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index e4865bfd44d..05a09ad4b4a 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1887,6 +1887,21 @@ LWLockReleaseAll(void)
/*
+ * ForEachLWLockHeldByMe - run a callback for each held lock
+ *
+ * This is meant as debug support only.
+ */
+void
+ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
+ void *context)
+{
+ int i;
+
+ for (i = 0; i < num_held_lwlocks; i++)
+ callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
+}
+
+/*
* LWLockHeldByMe - test whether my process holds a lock in any mode
*
* This is meant as debug support only.
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 4c85a01b284..d2b16d81fe3 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -66,6 +66,7 @@
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_locale.h"
+#include "utils/relcache.h"
#include "utils/syscache.h"
#ifdef USE_ICU
@@ -1258,6 +1259,8 @@ lookup_collation_cache(Oid collation, bool set_flags)
Assert(OidIsValid(collation));
Assert(collation != DEFAULT_COLLATION_OID);
+ AssertCouldGetRelation();
+
if (collation_cache == NULL)
{
/* First time through, initialize the hash table */
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 5169ca72bc0..7a67b46a6b8 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1055,11 +1055,40 @@ RehashCatCacheLists(CatCache *cp)
}
/*
+ * ConditionalCatalogCacheInitializeCache
+ *
+ * Call CatalogCacheInitializeCache() if not yet done.
+ */
+pg_attribute_always_inline
+static void
+ConditionalCatalogCacheInitializeCache(CatCache *cache)
+{
+#ifdef USE_ASSERT_CHECKING
+ /*
+ * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
+ * for hashing. This isn't ideal. Since lookup_type_cache() both
+ * registers the callback and searches TYPEOID, reaching trouble likely
+ * requires OOM at an unlucky moment.
+ *
+ * InvalidateAttoptCacheCallback() runs outside transactions and likewise
+ * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
+ */
+ if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
+ IsTransactionState())
+ AssertCouldGetRelation();
+ else
+ Assert(cache->cc_tupdesc != NULL);
+#endif
+
+ if (unlikely(cache->cc_tupdesc == NULL))
+ CatalogCacheInitializeCache(cache);
+}
+
+/*
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
- * that the relcache entry can be opened at this point!
+ * descriptor and set up the hash and equality function links.
*/
#ifdef CACHEDEBUG
#define CatalogCacheInitializeCache_DEBUG1 \
@@ -1194,8 +1223,7 @@ CatalogCacheInitializeCache(CatCache *cache)
void
InitCatCachePhase2(CatCache *cache, bool touch_index)
{
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
if (touch_index &&
cache->id != AMOID &&
@@ -1374,16 +1402,12 @@ SearchCatCacheInternal(CatCache *cache,
dlist_head *bucket;
CatCTup *ct;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
-
Assert(cache->cc_nkeys == nkeys);
/*
* one-time startup overhead for each cache
*/
- if (unlikely(cache->cc_tupdesc == NULL))
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
#ifdef CATCACHE_STATS
cache->cc_searches++;
@@ -1669,8 +1693,7 @@ GetCatCacheHashValue(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
/*
* calculate the hash value
@@ -1721,8 +1744,7 @@ SearchCatCacheList(CatCache *cache,
/*
* one-time startup overhead for each cache
*/
- if (unlikely(cache->cc_tupdesc == NULL))
- CatalogCacheInitializeCache(cache);
+ ConditionalCatalogCacheInitializeCache(cache);
Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
@@ -2392,8 +2414,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
continue;
/* Just in case cache hasn't finished initialization yet... */
- if (ccp->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(ccp);
+ ConditionalCatalogCacheInitializeCache(ccp);
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 4c8715bf672..92f9bf6f466 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -631,7 +631,8 @@ PrepareInvalidationState(void)
{
TransInvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
/* Can't queue transactional message while collecting inplace messages. */
Assert(inplaceInvalInfo == NULL);
@@ -700,7 +701,7 @@ PrepareInplaceInvalidationState(void)
{
InvalidationInfo *myInfo;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/* limit of one inplace update under assembly */
Assert(inplaceInvalInfo == NULL);
@@ -863,6 +864,12 @@ InvalidateSystemCaches(void)
void
AcceptInvalidationMessages(void)
{
+#ifdef USE_ASSERT_CHECKING
+ /* message handlers shall access catalogs only during transactions */
+ if (IsTransactionState())
+ AssertCouldGetRelation();
+#endif
+
ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
InvalidateSystemCaches);
@@ -1327,6 +1334,9 @@ CacheInvalidateHeapTupleCommon(Relation relation,
Oid databaseId;
Oid relationId;
+ /* PrepareToInvalidateCacheTuple() needs relcache */
+ AssertCouldGetRelation();
+
/* Do nothing during bootstrap */
if (IsBootstrapProcessingMode())
return;
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 3f1e8ce1f5f..9e0519cd1d0 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2037,6 +2037,23 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_isvalid = true;
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * AssertCouldGetRelation
+ *
+ * Check safety of calling RelationIdGetRelation().
+ *
+ * In code that reads catalogs in the event of a cache miss, call this
+ * before checking the cache.
+ */
+void
+AssertCouldGetRelation(void)
+{
+ Assert(IsTransactionState());
+ AssertBufferLocksPermitCatalogRead();
+}
+#endif
+
/* ----------------------------------------------------------------
* Relation Descriptor Lookup Interface
@@ -2064,8 +2081,7 @@ RelationIdGetRelation(Oid relationId)
{
Relation rd;
- /* Make sure we're in an xact, even if this ends up being a cache hit */
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
/*
* first try to find reldesc in the cache
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 97a4d695159..6af3b8effac 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -39,6 +39,7 @@
#include "mb/pg_wchar.h"
#include "utils/fmgrprotos.h"
#include "utils/memutils.h"
+#include "utils/relcache.h"
#include "varatt.h"
/*
@@ -310,7 +311,7 @@ InitializeClientEncoding(void)
{
Oid utf8_to_server_proc;
- Assert(IsTransactionState());
+ AssertCouldGetRelation();
utf8_to_server_proc =
FindDefaultConversionProc(PG_UTF8,
current_server_encoding);
diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h
index a1e71013d32..09c43dd9c72 100644
--- a/src/include/storage/bufmgr.h
+++ b/src/include/storage/bufmgr.h
@@ -255,6 +255,9 @@ extern Buffer ExtendBufferedRelTo(BufferManagerRelation bmr,
extern void InitBufferPoolAccess(void);
extern void AtEOXact_Buffers(bool isCommit);
+#ifdef USE_ASSERT_CHECKING
+extern void AssertBufferLocksPermitCatalogRead(void);
+#endif
extern char *DebugPrintBufferRefcount(Buffer buffer);
extern void CheckPointBuffers(int flags);
extern BlockNumber BufferGetBlockNumber(Buffer buffer);
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index d70e6d37e09..5435948a1cd 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -129,6 +129,8 @@ extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
extern void LWLockRelease(LWLock *lock);
extern void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val);
extern void LWLockReleaseAll(void);
+extern void ForEachLWLockHeldByMe(void (*callback) (LWLock *, LWLockMode, void *),
+ void *context);
extern bool LWLockHeldByMe(LWLock *lock);
extern bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride);
extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index 18c32ea7008..6cb829e59ce 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -37,6 +37,14 @@ typedef Relation *RelationPtr;
/*
* Routines to open (lookup) and close a relcache entry
*/
+#ifdef USE_ASSERT_CHECKING
+extern void AssertCouldGetRelation(void);
+#else
+static inline void
+AssertCouldGetRelation(void)
+{
+}
+#endif
extern Relation RelationIdGetRelation(Oid relationId);
extern void RelationClose(Relation relation);