diff options
Diffstat (limited to 'src/backend/access/heap/heapam.c')
| -rw-r--r-- | src/backend/access/heap/heapam.c | 51 |
1 files changed, 26 insertions, 25 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 225f9829f22..6daf4a87dec 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6116,7 +6116,7 @@ heap_finish_speculative(Relation relation, const ItemPointerData *tid) Buffer buffer; Page page; OffsetNumber offnum; - ItemId lp = NULL; + ItemId lp; HeapTupleHeader htup; buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); @@ -6124,10 +6124,10 @@ heap_finish_speculative(Relation relation, const ItemPointerData *tid) page = BufferGetPage(buffer); offnum = ItemPointerGetOffsetNumber(tid); - if (PageGetMaxOffsetNumber(page) >= offnum) - lp = PageGetItemId(page, offnum); - - if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp)) + if (offnum < 1 || offnum > PageGetMaxOffsetNumber(page)) + elog(ERROR, "offnum out of range"); + lp = PageGetItemId(page, offnum); + if (!ItemIdIsNormal(lp)) elog(ERROR, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); @@ -6349,10 +6349,13 @@ heap_abort_speculative(Relation relation, const ItemPointerData *tid) * Since this is intended for system catalogs and SERIALIZABLE doesn't cover * DDL, this doesn't guarantee any particular predicate locking. * - * One could modify this to return true for tuples with delete in progress, - * All inplace updaters take a lock that conflicts with DROP. If explicit - * "DELETE FROM pg_class" is in progress, we'll wait for it like we would an - * update. + * heap_delete() is a rarer source of blocking transactions (xwait). We'll + * wait for such a transaction just like for the normal heap_update() case. + * Normal concurrent DROP commands won't cause that, because all inplace + * updaters take some lock that conflicts with DROP. An explicit SQL "DELETE + * FROM pg_class" can cause it. By waiting, if the concurrent transaction + * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller + * can find the successor tuple. * * Readers of inplace-updated fields expect changes to those fields are * durable. For example, vac_truncate_clog() reads datfrozenxid from @@ -6393,15 +6396,17 @@ heap_inplace_lock(Relation relation, Assert(BufferIsValid(buffer)); /* - * Construct shared cache inval if necessary. Because we pass a tuple - * version without our own inplace changes or inplace changes other - * sessions complete while we wait for locks, inplace update mustn't - * change catcache lookup keys. But we aren't bothering with index - * updates either, so that's true a fortiori. After LockBuffer(), it - * would be too late, because this might reach a - * CatalogCacheInitializeCache() that locks "buffer". + * Register shared cache invals if necessary. Other sessions may finish + * inplace updates of this tuple between this step and LockTuple(). Since + * inplace updates don't change cache keys, that's harmless. + * + * While it's tempting to register invals only after confirming we can + * return true, the following obstacle precludes reordering steps that + * way. Registering invals might reach a CatalogCacheInitializeCache() + * that locks "buffer". That would hang indefinitely if running after our + * own LockBuffer(). Hence, we must register invals before LockBuffer(). */ - CacheInvalidateHeapTupleInplace(relation, oldtup_ptr, NULL); + CacheInvalidateHeapTupleInplace(relation, oldtup_ptr); LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); @@ -6639,10 +6644,6 @@ heap_inplace_update_and_unlock(Relation relation, /* * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we * do this before UnlockTuple(). - * - * If we're mutating a tuple visible only to this transaction, there's an - * equivalent transactional inval from the action that created the tuple, - * and this inval is superfluous. */ AtInplace_Inval(); @@ -6653,10 +6654,10 @@ heap_inplace_update_and_unlock(Relation relation, AcceptInvalidationMessages(); /* local processing of just-sent inval */ /* - * Queue a transactional inval. The immediate invalidation we just sent - * is the only one known to be necessary. To reduce risk from the - * transition to immediate invalidation, continue sending a transactional - * invalidation like we've long done. Third-party code might rely on it. + * Queue a transactional inval, for logical decoding and for third-party + * code that might have been relying on it since long before inplace + * update adopted immediate invalidation. See README.tuplock section + * "Reading inplace-updated columns" for logical decoding details. */ if (!IsBootstrapProcessingMode()) CacheInvalidateHeapTuple(relation, tuple, NULL); |
