diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 3 | ||||
| -rw-r--r-- | lib/alloc_tag.c | 34 | ||||
| -rw-r--r-- | lib/iov_iter.c | 30 | ||||
| -rw-r--r-- | lib/maple_tree.c | 191 | ||||
| -rw-r--r-- | lib/test_vmalloc.c | 22 | ||||
| -rw-r--r-- | lib/test_xarray.c | 17 | ||||
| -rw-r--r-- | lib/xarray.c | 9 |
7 files changed, 177 insertions, 129 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 87517564e698af..90edcc06e770d1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2568,8 +2568,7 @@ config TEST_BITOPS config TEST_VMALLOC tristate "Test module for stress/performance analysis of vmalloc allocator" default n - depends on MMU - depends on m + depends on MMU help This builds the "test_vmalloc" module that should be used for stress and performance analysis. So, any new change for vmalloc diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index c7f602fa7b23fc..45dae7da70e17d 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -244,17 +244,6 @@ static void shutdown_mem_profiling(bool remove_file) mem_profiling_support = false; } -static void __init procfs_init(void) -{ - if (!mem_profiling_support) - return; - - if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { - pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); - shutdown_mem_profiling(false); - } -} - void __init alloc_tag_sec_init(void) { struct alloc_tag *last_codetag; @@ -813,19 +802,34 @@ static int __init alloc_tag_init(void) }; int res; + sysctl_init(); + + if (!mem_profiling_support) { + pr_info("Memory allocation profiling is not supported!\n"); + return 0; + } + + if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { + pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); + shutdown_mem_profiling(false); + return -ENOMEM; + } + res = alloc_mod_tags_mem(); - if (res) + if (res) { + pr_err("Failed to reserve address space for module tags, errno = %d\n", res); + shutdown_mem_profiling(true); return res; + } alloc_tag_cttype = codetag_register_type(&desc); if (IS_ERR(alloc_tag_cttype)) { + pr_err("Allocation tags registration failed, errno = %ld\n", PTR_ERR(alloc_tag_cttype)); free_mod_tags_mem(); + shutdown_mem_profiling(true); return PTR_ERR(alloc_tag_cttype); } - sysctl_init(); - procfs_init(); - return 0; } module_init(alloc_tag_init); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index bc9391e55d57ea..d9e19fb2dcf35f 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1059,22 +1059,22 @@ static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa pgoff_t index, unsigned int nr_pages) { XA_STATE(xas, xa, index); - struct page *page; + struct folio *folio; unsigned int ret = 0; rcu_read_lock(); - for (page = xas_load(&xas); page; page = xas_next(&xas)) { - if (xas_retry(&xas, page)) + for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { + if (xas_retry(&xas, folio)) continue; - /* Has the page moved or been split? */ - if (unlikely(page != xas_reload(&xas))) { + /* Has the folio moved or been split? */ + if (unlikely(folio != xas_reload(&xas))) { xas_reset(&xas); continue; } - pages[ret] = find_subpage(page, xas.xa_index); - get_page(pages[ret]); + pages[ret] = folio_file_page(folio, xas.xa_index); + folio_get(folio); if (++ret == nr_pages) break; } @@ -1650,11 +1650,11 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, iov_iter_extraction_t extraction_flags, size_t *offset0) { - struct page *page, **p; + struct page **p; + struct folio *folio; unsigned int nr = 0, offset; loff_t pos = i->xarray_start + i->iov_offset; - pgoff_t index = pos >> PAGE_SHIFT; - XA_STATE(xas, i->xarray, index); + XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT); offset = pos & ~PAGE_MASK; *offset0 = offset; @@ -1665,17 +1665,17 @@ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, p = *pages; rcu_read_lock(); - for (page = xas_load(&xas); page; page = xas_next(&xas)) { - if (xas_retry(&xas, page)) + for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { + if (xas_retry(&xas, folio)) continue; - /* Has the page moved or been split? */ - if (unlikely(page != xas_reload(&xas))) { + /* Has the folio moved or been split? */ + if (unlikely(folio != xas_reload(&xas))) { xas_reset(&xas); continue; } - p[nr++] = find_subpage(page, xas.xa_index); + p[nr++] = folio_file_page(folio, xas.xa_index); if (nr == maxpages) break; } diff --git a/lib/maple_tree.c b/lib/maple_tree.c index d0bea23fa4bc9f..affe979bd14d30 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -211,14 +211,14 @@ static void ma_free_rcu(struct maple_node *node) call_rcu(&node->rcu, mt_free_rcu); } -static void mas_set_height(struct ma_state *mas) +static void mt_set_height(struct maple_tree *mt, unsigned char height) { - unsigned int new_flags = mas->tree->ma_flags; + unsigned int new_flags = mt->ma_flags; new_flags &= ~MT_FLAGS_HEIGHT_MASK; - MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX); - new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; - mas->tree->ma_flags = new_flags; + MT_BUG_ON(mt, height > MAPLE_HEIGHT_MAX); + new_flags |= height << MT_FLAGS_HEIGHT_OFFSET; + mt->ma_flags = new_flags; } static unsigned int mas_mt_height(struct ma_state *mas) @@ -1371,7 +1371,7 @@ retry: root = mas_root(mas); /* Tree with nodes */ if (likely(xa_is_node(root))) { - mas->depth = 1; + mas->depth = 0; mas->status = ma_active; mas->node = mte_safe_root(root); mas->offset = 0; @@ -1712,9 +1712,10 @@ static inline void mas_adopt_children(struct ma_state *mas, * node as dead. * @mas: the maple state with the new node * @old_enode: The old maple encoded node to replace. + * @new_height: if we are inserting a root node, update the height of the tree */ static inline void mas_put_in_tree(struct ma_state *mas, - struct maple_enode *old_enode) + struct maple_enode *old_enode, char new_height) __must_hold(mas->tree->ma_lock) { unsigned char offset; @@ -1723,7 +1724,7 @@ static inline void mas_put_in_tree(struct ma_state *mas, if (mte_is_root(mas->node)) { mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas)); rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); - mas_set_height(mas); + mt_set_height(mas->tree, new_height); } else { offset = mte_parent_slot(mas->node); @@ -1741,12 +1742,13 @@ static inline void mas_put_in_tree(struct ma_state *mas, * the parent encoding to locate the maple node in the tree. * @mas: the ma_state with @mas->node pointing to the new node. * @old_enode: The old maple encoded node. + * @new_height: The new height of the tree as a result of the operation */ static inline void mas_replace_node(struct ma_state *mas, - struct maple_enode *old_enode) + struct maple_enode *old_enode, unsigned char new_height) __must_hold(mas->tree->ma_lock) { - mas_put_in_tree(mas, old_enode); + mas_put_in_tree(mas, old_enode, new_height); mas_free(mas, old_enode); } @@ -2536,10 +2538,11 @@ static inline void mas_topiary_node(struct ma_state *mas, * * @mas: The maple state pointing at the new data * @old_enode: The maple encoded node being replaced + * @new_height: The new height of the tree as a result of the operation * */ static inline void mas_topiary_replace(struct ma_state *mas, - struct maple_enode *old_enode) + struct maple_enode *old_enode, unsigned char new_height) { struct ma_state tmp[3], tmp_next[3]; MA_TOPIARY(subtrees, mas->tree); @@ -2547,7 +2550,7 @@ static inline void mas_topiary_replace(struct ma_state *mas, int i, n; /* Place data in tree & then mark node as old */ - mas_put_in_tree(mas, old_enode); + mas_put_in_tree(mas, old_enode, new_height); /* Update the parent pointers in the tree */ tmp[0] = *mas; @@ -2631,14 +2634,15 @@ static inline void mas_topiary_replace(struct ma_state *mas, * mas_wmb_replace() - Write memory barrier and replace * @mas: The maple state * @old_enode: The old maple encoded node that is being replaced. + * @new_height: The new height of the tree as a result of the operation * * Updates gap as necessary. */ static inline void mas_wmb_replace(struct ma_state *mas, - struct maple_enode *old_enode) + struct maple_enode *old_enode, unsigned char new_height) { /* Insert the new data in the tree */ - mas_topiary_replace(mas, old_enode); + mas_topiary_replace(mas, old_enode, new_height); if (mte_is_leaf(mas->node)) return; @@ -2737,7 +2741,7 @@ static inline bool mast_sufficient(struct maple_subtree_state *mast) */ static inline bool mast_overflow(struct maple_subtree_state *mast) { - if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) + if (mast->bn->b_end > mt_slot_count(mast->orig_l->node)) return true; return false; @@ -2824,6 +2828,7 @@ static void mas_spanning_rebalance(struct ma_state *mas, { unsigned char split, mid_split; unsigned char slot = 0; + unsigned char new_height = 0; /* used if node is a new root */ struct maple_enode *left = NULL, *middle = NULL, *right = NULL; struct maple_enode *old_enode; @@ -2845,8 +2850,6 @@ static void mas_spanning_rebalance(struct ma_state *mas, unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) mast_spanning_rebalance(mast); - l_mas.depth = 0; - /* * Each level of the tree is examined and balanced, pushing data to the left or * right, or rebalancing against left or right nodes is employed to avoid @@ -2866,6 +2869,7 @@ static void mas_spanning_rebalance(struct ma_state *mas, mast_set_split_parents(mast, left, middle, right, split, mid_split); mast_cp_to_nodes(mast, left, middle, right, split, mid_split); + new_height++; /* * Copy data from next level in the tree to mast->bn from next @@ -2873,7 +2877,6 @@ static void mas_spanning_rebalance(struct ma_state *mas, */ memset(mast->bn, 0, sizeof(struct maple_big_node)); mast->bn->type = mte_node_type(left); - l_mas.depth++; /* Root already stored in l->node. */ if (mas_is_root_limits(mast->l)) @@ -2890,11 +2893,21 @@ static void mas_spanning_rebalance(struct ma_state *mas, mast_combine_cp_right(mast); mast->orig_l->last = mast->orig_l->max; - if (mast_sufficient(mast)) - continue; + if (mast_sufficient(mast)) { + if (mast_overflow(mast)) + continue; + + if (mast->orig_l->node == mast->orig_r->node) { + /* + * The data in b_node should be stored in one + * node and in the tree + */ + slot = mast->l->offset; + break; + } - if (mast_overflow(mast)) continue; + } /* May be a new root stored in mast->bn */ if (mas_is_root_limits(mast->orig_l)) @@ -2909,8 +2922,9 @@ static void mas_spanning_rebalance(struct ma_state *mas, l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), mte_node_type(mast->orig_l->node)); - l_mas.depth++; + mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); + new_height++; mas_set_parent(mas, left, l_mas.node, slot); if (middle) mas_set_parent(mas, middle, l_mas.node, ++slot); @@ -2933,7 +2947,7 @@ new_root: mas->min = l_mas.min; mas->max = l_mas.max; mas->offset = l_mas.offset; - mas_wmb_replace(mas, old_enode); + mas_wmb_replace(mas, old_enode, new_height); mtree_range_walk(mas); return; } @@ -3009,6 +3023,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end void __rcu **l_slots, **slots; unsigned long *l_pivs, *pivs, gap; bool in_rcu = mt_in_rcu(mas->tree); + unsigned char new_height = mas_mt_height(mas); MA_STATE(l_mas, mas->tree, mas->index, mas->last); @@ -3103,7 +3118,7 @@ done: mas_ascend(mas); if (in_rcu) { - mas_replace_node(mas, old_eparent); + mas_replace_node(mas, old_eparent, new_height); mas_adopt_children(mas, mas->node); } @@ -3114,10 +3129,9 @@ done: * mas_split_final_node() - Split the final node in a subtree operation. * @mast: the maple subtree state * @mas: The maple state - * @height: The height of the tree in case it's a new root. */ static inline void mas_split_final_node(struct maple_subtree_state *mast, - struct ma_state *mas, int height) + struct ma_state *mas) { struct maple_enode *ancestor; @@ -3126,7 +3140,6 @@ static inline void mas_split_final_node(struct maple_subtree_state *mast, mast->bn->type = maple_arange_64; else mast->bn->type = maple_range_64; - mas->depth = height; } /* * Only a single node is used here, could be root. @@ -3214,7 +3227,6 @@ static inline void mast_split_data(struct maple_subtree_state *mast, * mas_push_data() - Instead of splitting a node, it is beneficial to push the * data to the right or left node if there is room. * @mas: The maple state - * @height: The current height of the maple state * @mast: The maple subtree state * @left: Push left or not. * @@ -3222,8 +3234,8 @@ static inline void mast_split_data(struct maple_subtree_state *mast, * * Return: True if pushed, false otherwise. */ -static inline bool mas_push_data(struct ma_state *mas, int height, - struct maple_subtree_state *mast, bool left) +static inline bool mas_push_data(struct ma_state *mas, + struct maple_subtree_state *mast, bool left) { unsigned char slot_total = mast->bn->b_end; unsigned char end, space, split; @@ -3280,7 +3292,7 @@ static inline bool mas_push_data(struct ma_state *mas, int height, mast_split_data(mast, mas, split); mast_fill_bnode(mast, mas, 2); - mas_split_final_node(mast, mas, height + 1); + mas_split_final_node(mast, mas); return true; } @@ -3293,6 +3305,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) { struct maple_subtree_state mast; int height = 0; + unsigned int orig_height = mas_mt_height(mas); unsigned char mid_split, split = 0; struct maple_enode *old; @@ -3319,7 +3332,6 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); trace_ma_op(__func__, mas); - mas->depth = mas_mt_height(mas); mast.l = &l_mas; mast.r = &r_mas; @@ -3327,9 +3339,9 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) mast.orig_r = &prev_r_mas; mast.bn = b_node; - while (height++ <= mas->depth) { + while (height++ <= orig_height) { if (mt_slots[b_node->type] > b_node->b_end) { - mas_split_final_node(&mast, mas, height); + mas_split_final_node(&mast, mas); break; } @@ -3344,11 +3356,15 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) * is a significant savings. */ /* Try to push left. */ - if (mas_push_data(mas, height, &mast, true)) + if (mas_push_data(mas, &mast, true)) { + height++; break; + } /* Try to push right. */ - if (mas_push_data(mas, height, &mast, false)) + if (mas_push_data(mas, &mast, false)) { + height++; break; + } split = mab_calc_split(mas, b_node, &mid_split); mast_split_data(&mast, mas, split); @@ -3365,7 +3381,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) /* Set the original node as dead */ old = mas->node; mas->node = l_mas.node; - mas_wmb_replace(mas, old); + mas_wmb_replace(mas, old, height); mtree_range_walk(mas); return; } @@ -3424,8 +3440,7 @@ static inline void mas_root_expand(struct ma_state *mas, void *entry) if (mas->last != ULONG_MAX) pivots[++slot] = ULONG_MAX; - mas->depth = 1; - mas_set_height(mas); + mt_set_height(mas->tree, 1); ma_set_meta(node, maple_leaf_64, 0, slot); /* swap the new root into the tree */ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); @@ -3532,6 +3547,16 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas) if (ma_is_leaf(wr_mas->type)) return true; + if (mas->end < mt_slots[wr_mas->type] - 1) + wr_mas->vacant_height = mas->depth + 1; + + if (ma_is_root(mas_mn(mas))) { + /* root needs more than 2 entries to be sufficient + 1 */ + if (mas->end > 2) + wr_mas->sufficient_height = 1; + } else if (mas->end > mt_min_slots[wr_mas->type] + 1) + wr_mas->sufficient_height = mas->depth + 1; + mas_wr_walk_traverse(wr_mas); } @@ -3669,8 +3694,7 @@ static inline void mas_new_root(struct ma_state *mas, void *entry) WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX); if (!entry) { - mas->depth = 0; - mas_set_height(mas); + mt_set_height(mas->tree, 0); rcu_assign_pointer(mas->tree->ma_root, entry); mas->status = ma_start; goto done; @@ -3684,8 +3708,7 @@ static inline void mas_new_root(struct ma_state *mas, void *entry) mas->status = ma_active; rcu_assign_pointer(slots[0], entry); pivots[0] = mas->last; - mas->depth = 1; - mas_set_height(mas); + mt_set_height(mas->tree, 1); rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); done: @@ -3804,6 +3827,7 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas, struct maple_node reuse, *newnode; unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; bool in_rcu = mt_in_rcu(mas->tree); + unsigned char height = mas_mt_height(mas); if (mas->last == wr_mas->end_piv) offset_end++; /* don't copy this offset */ @@ -3860,7 +3884,7 @@ done: struct maple_enode *old_enode = mas->node; mas->node = mt_mk_node(newnode, wr_mas->type); - mas_replace_node(mas, old_enode); + mas_replace_node(mas, old_enode, height); } else { memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); } @@ -4059,15 +4083,6 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) unsigned char new_end = mas_wr_new_end(wr_mas); switch (mas->store_type) { - case wr_invalid: - MT_BUG_ON(mas->tree, 1); - return; - case wr_new_root: - mas_new_root(mas, wr_mas->entry); - break; - case wr_store_root: - mas_store_root(mas, wr_mas->entry); - break; case wr_exact_fit: rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); if (!!wr_mas->entry ^ !!wr_mas->content) @@ -4089,6 +4104,14 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) case wr_rebalance: mas_wr_bnode(wr_mas); break; + case wr_new_root: + mas_new_root(mas, wr_mas->entry); + break; + case wr_store_root: + mas_store_root(mas, wr_mas->entry); + break; + case wr_invalid: + MT_BUG_ON(mas->tree, 1); } return; @@ -4140,18 +4163,41 @@ set_content: /** * mas_prealloc_calc() - Calculate number of nodes needed for a * given store oepration - * @mas: The maple state + * @wr_mas: The maple write state * @entry: The entry to store into the tree * * Return: Number of nodes required for preallocation. */ -static inline int mas_prealloc_calc(struct ma_state *mas, void *entry) +static inline int mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry) { - int ret = mas_mt_height(mas) * 3 + 1; + struct ma_state *mas = wr_mas->mas; + unsigned char height = mas_mt_height(mas); + int ret = height * 3 + 1; + unsigned char delta = height - wr_mas->vacant_height; switch (mas->store_type) { - case wr_invalid: - WARN_ON_ONCE(1); + case wr_exact_fit: + case wr_append: + case wr_slot_store: + ret = 0; + break; + case wr_spanning_store: + if (wr_mas->sufficient_height < wr_mas->vacant_height) + ret = (height - wr_mas->sufficient_height) * 3 + 1; + else + ret = delta * 3 + 1; + break; + case wr_split_store: + ret = delta * 2 + 1; + break; + case wr_rebalance: + if (wr_mas->sufficient_height < wr_mas->vacant_height) + ret = (height - wr_mas->sufficient_height) * 2 + 1; + else + ret = delta * 2 + 1; + break; + case wr_node_store: + ret = mt_in_rcu(mas->tree) ? 1 : 0; break; case wr_new_root: ret = 1; @@ -4164,22 +4210,8 @@ static inline int mas_prealloc_calc(struct ma_state *mas, void *entry) else ret = 0; break; - case wr_spanning_store: - ret = mas_mt_height(mas) * 3 + 1; - break; - case wr_split_store: - ret = mas_mt_height(mas) * 2 + 1; - break; - case wr_rebalance: - ret = mas_mt_height(mas) * 2 - 1; - break; - case wr_node_store: - ret = mt_in_rcu(mas->tree) ? 1 : 0; - break; - case wr_append: - case wr_exact_fit: - case wr_slot_store: - ret = 0; + case wr_invalid: + WARN_ON_ONCE(1); } return ret; @@ -4243,16 +4275,15 @@ static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas) */ static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry) { - struct ma_state *mas = wr_mas->mas; int request; mas_wr_prealloc_setup(wr_mas); - mas->store_type = mas_wr_store_type(wr_mas); - request = mas_prealloc_calc(mas, entry); + wr_mas->mas->store_type = mas_wr_store_type(wr_mas); + request = mas_prealloc_calc(wr_mas, entry); if (!request) return; - mas_node_count(mas, request); + mas_node_count(wr_mas->mas, request); } /** @@ -5397,7 +5428,7 @@ void *mas_store(struct ma_state *mas, void *entry) return wr_mas.content; } - request = mas_prealloc_calc(mas, entry); + request = mas_prealloc_calc(&wr_mas, entry); if (!request) goto store; @@ -5494,7 +5525,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) mas_wr_prealloc_setup(&wr_mas); mas->store_type = mas_wr_store_type(&wr_mas); - request = mas_prealloc_calc(mas, entry); + request = mas_prealloc_calc(&wr_mas, entry); if (!request) return ret; diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index f585949ff696ee..1b0b59549aaf11 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -13,9 +13,9 @@ #include <linux/moduleparam.h> #include <linux/completion.h> #include <linux/delay.h> -#include <linux/rwsem.h> #include <linux/mm.h> #include <linux/rcupdate.h> +#include <linux/srcu.h> #include <linux/slab.h> #define __param(type, name, init, msg) \ @@ -58,10 +58,9 @@ __param(int, run_test_mask, INT_MAX, ); /* - * Read write semaphore for synchronization of setup - * phase that is done in main thread and workers. + * This is for synchronization of setup phase. */ -static DECLARE_RWSEM(prepare_for_test_rwsem); +DEFINE_STATIC_SRCU(prepare_for_test_srcu); /* * Completion tracking for worker threads. @@ -458,7 +457,7 @@ static int test_func(void *private) /* * Block until initialization is done. */ - down_read(&prepare_for_test_rwsem); + synchronize_srcu(&prepare_for_test_srcu); t->start = get_cycles(); for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { @@ -487,8 +486,6 @@ static int test_func(void *private) t->data[index].time = delta; } t->stop = get_cycles(); - - up_read(&prepare_for_test_rwsem); test_report_one_done(); /* @@ -526,7 +523,7 @@ init_test_configuration(void) static void do_concurrent_test(void) { - int i, ret; + int i, ret, idx; /* * Set some basic configurations plus sanity check. @@ -538,7 +535,7 @@ static void do_concurrent_test(void) /* * Put on hold all workers. */ - down_write(&prepare_for_test_rwsem); + idx = srcu_read_lock(&prepare_for_test_srcu); for (i = 0; i < nr_threads; i++) { struct test_driver *t = &tdriver[i]; @@ -555,7 +552,7 @@ static void do_concurrent_test(void) /* * Now let the workers do their job. */ - up_write(&prepare_for_test_rwsem); + srcu_read_unlock(&prepare_for_test_srcu, idx); /* * Sleep quiet until all workers are done with 1 second @@ -594,10 +591,11 @@ static void do_concurrent_test(void) kvfree(tdriver); } -static int vmalloc_test_init(void) +static int __init vmalloc_test_init(void) { do_concurrent_test(); - return -EAGAIN; /* Fail will directly unload the module */ + /* Fail will directly unload the module */ + return IS_BUILTIN(CONFIG_TEST_VMALLOC) ? 0:-EAGAIN; } module_init(vmalloc_test_init) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 080a39d22e7362..5ca0aefee9aa5f 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1040,6 +1040,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) unsigned int i, id; unsigned long index; void *entry; + int ret; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, &next, GFP_KERNEL) != 0); @@ -1059,7 +1060,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) else entry = xa_mk_index(i - 0x3fff); XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, - &next, GFP_KERNEL) != (id == 1)); + &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_mk_index(id) != entry); } @@ -1072,7 +1073,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) xa_limit_32b, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != UINT_MAX); XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), - xa_limit_32b, &next, GFP_KERNEL) != 1); + xa_limit_32b, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != base); XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), xa_limit_32b, &next, GFP_KERNEL) != 0); @@ -1080,7 +1081,19 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) xa_for_each(xa, index, entry) xa_erase_index(xa, index); + XA_BUG_ON(xa, !xa_empty(xa)); + /* check wrap-around return of __xa_alloc_cyclic() */ + next = UINT_MAX; + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), + xa_limit_32b, &next, GFP_KERNEL) != 0); + xa_lock(xa); + ret = __xa_alloc_cyclic(xa, &id, xa_mk_index(base), xa_limit_32b, + &next, GFP_KERNEL); + xa_unlock(xa); + XA_BUG_ON(xa, ret != 1); + xa_for_each(xa, index, entry) + xa_erase_index(xa, index); XA_BUG_ON(xa, !xa_empty(xa)); } diff --git a/lib/xarray.c b/lib/xarray.c index 9644b18af18d17..76dde3a1cacf5f 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1742,20 +1742,23 @@ static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, void *old, void *entry, gfp_t gfp); /** - * __xa_cmpxchg() - Store this entry in the XArray. + * __xa_cmpxchg() - Conditionally replace an entry in the XArray. * @xa: XArray. * @index: Index into array. * @old: Old value to test against. - * @entry: New entry. + * @entry: New value to place in array. * @gfp: Memory allocation flags. * * You must already be holding the xa_lock when calling this function. * It will drop the lock if needed to allocate memory, and then reacquire * it afterwards. * + * If the entry at @index is the same as @old, replace it with @entry. + * If the return value is equal to @old, then the exchange was successful. + * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. - * Return: The old entry at this index or xa_err() if an error happened. + * Return: The old value at this index or xa_err() if an error happened. */ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, void *old, void *entry, gfp_t gfp) |
