aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mprotect.c
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2025-10-23 19:37:35 +0800
committerAndrew Morton <akpm@linux-foundation.org>2025-11-16 17:28:03 -0800
commit6e97624dacc1a3599bae3724c79f1942e11c2912 (patch)
tree1f6ba42371cbfbfa762854a2ae08695b127edb28 /mm/mprotect.c
parent03aa8e4f273284a6abf28c0d86529cf3947328b2 (diff)
downloadtip-6e97624dacc1a3599bae3724c79f1942e11c2912.tar.gz
mm: mprotect: avoid unnecessary struct page accessing if pte_protnone()
If the pte_protnone() is true, we could avoid unnecessary struct page accessing and reduce cache footprint when scanning page tables for prot numa, there was a similar change before, see more commit a818f5363a0e ("autonuma: reduce cache footprint when scanning page tables"). Link: https://lkml.kernel.org/r/20251023113737.3572790-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: Zi Yan <ziy@nvidia.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Dev Jain <dev.jain@arm.com> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mprotect.c')
-rw-r--r--mm/mprotect.c29
1 files changed, 13 insertions, 16 deletions
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 056986d9076a84..6236d120c8e6d5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -118,18 +118,13 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
}
-static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
- pte_t oldpte, pte_t *pte, int target_node,
- struct folio *folio)
+static bool prot_numa_skip(struct vm_area_struct *vma, int target_node,
+ struct folio *folio)
{
bool ret = true;
bool toptier;
int nid;
- /* Avoid TLB flush if possible */
- if (pte_protnone(oldpte))
- goto skip;
-
if (!folio)
goto skip;
@@ -307,23 +302,25 @@ static long change_pte_range(struct mmu_gather *tlb,
struct page *page;
pte_t ptent;
+ /* Already in the desired state. */
+ if (prot_numa && pte_protnone(oldpte))
+ continue;
+
page = vm_normal_page(vma, addr, oldpte);
if (page)
folio = page_folio(page);
+
/*
* Avoid trapping faults against the zero or KSM
* pages. See similar comment in change_huge_pmd.
*/
- if (prot_numa) {
- int ret = prot_numa_skip(vma, addr, oldpte, pte,
- target_node, folio);
- if (ret) {
+ if (prot_numa &&
+ prot_numa_skip(vma, target_node, folio)) {
- /* determine batch to skip */
- nr_ptes = mprotect_folio_pte_batch(folio,
- pte, oldpte, max_nr_ptes, /* flags = */ 0);
- continue;
- }
+ /* determine batch to skip */
+ nr_ptes = mprotect_folio_pte_batch(folio,
+ pte, oldpte, max_nr_ptes, /* flags = */ 0);
+ continue;
}
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);