diff options
| -rw-r--r-- | mm/swap.h | 12 | ||||
| -rw-r--r-- | mm/swap_state.c | 3 |
2 files changed, 13 insertions, 2 deletions
diff --git a/mm/swap.h b/mm/swap.h index 742db4d46d2356..adcd85fa85381f 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -99,6 +99,16 @@ static __always_inline struct swap_cluster_info *__swap_cluster_lock( { struct swap_cluster_info *ci = __swap_offset_to_cluster(si, offset); + /* + * Nothing modifies swap cache in an IRQ context. All access to + * swap cache is wrapped by swap_cache_* helpers, and swap cache + * writeback is handled outside of IRQs. Swapin or swapout never + * occurs in IRQ, and neither does in-place split or replace. + * + * Besides, modifying swap cache requires synchronization with + * swap_map, which was never IRQ safe. + */ + VM_WARN_ON_ONCE(!in_task()); VM_WARN_ON_ONCE(percpu_ref_is_zero(&si->users)); /* race with swapoff */ if (irq) spin_lock_irq(&ci->lock); @@ -192,7 +202,7 @@ void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug); #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) #define SWAP_ADDRESS_SPACE_MASK (SWAP_ADDRESS_SPACE_PAGES - 1) -extern struct address_space swap_space; +extern struct address_space swap_space __ro_after_init; static inline struct address_space *swap_address_space(swp_entry_t entry) { return &swap_space; diff --git a/mm/swap_state.c b/mm/swap_state.c index 2558a648d671ef..a1478cbff38479 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -37,7 +37,8 @@ static const struct address_space_operations swap_aops = { #endif }; -struct address_space swap_space __read_mostly = { +/* Set swap_space as read only as swap cache is handled by swap table */ +struct address_space swap_space __ro_after_init = { .a_ops = &swap_aops, }; |
