diff options
| author | Anshuman Khandual <anshuman.khandual@arm.com> | 2025-10-07 07:31:00 +0100 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2025-11-16 17:27:56 -0800 |
| commit | c0efdb373c3aaacb32db59cadb0710cac13e44ae (patch) | |
| tree | bbcef6d33668043fa8d5cd74542bf3a17d5efbcd | |
| parent | bda7bf06840d4eb133abefa5a2fd75544277bd86 (diff) | |
| download | tip-c0efdb373c3aaacb32db59cadb0710cac13e44ae.tar.gz | |
mm: replace READ_ONCE() with standard page table accessors
Replace all READ_ONCE() with a standard page table accessors i.e
pxdp_get() that defaults into READ_ONCE() in cases where platform does not
override.
Link: https://lkml.kernel.org/r/20251007063100.2396936-1-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | mm/gup.c | 10 | ||||
| -rw-r--r-- | mm/hmm.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 4 | ||||
| -rw-r--r-- | mm/mprotect.c | 2 | ||||
| -rw-r--r-- | mm/sparse-vmemmap.c | 2 | ||||
| -rw-r--r-- | mm/vmscan.c | 2 |
6 files changed, 11 insertions, 11 deletions
diff --git a/mm/gup.c b/mm/gup.c index d2524fe09338fd..95d948c8e86c92 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -950,7 +950,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; pudp = pud_offset(p4dp, address); - pud = READ_ONCE(*pudp); + pud = pudp_get(pudp); if (!pud_present(pud)) return no_page_table(vma, flags, address); if (pud_leaf(pud)) { @@ -975,7 +975,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma, p4d_t *p4dp, p4d; p4dp = p4d_offset(pgdp, address); - p4d = READ_ONCE(*p4dp); + p4d = p4dp_get(p4dp); BUILD_BUG_ON(p4d_leaf(p4d)); if (!p4d_present(p4d) || p4d_bad(p4d)) @@ -3060,7 +3060,7 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, pudp = pud_offset_lockless(p4dp, p4d, addr); do { - pud_t pud = READ_ONCE(*pudp); + pud_t pud = pudp_get(pudp); next = pud_addr_end(addr, end); if (unlikely(!pud_present(pud))) @@ -3086,7 +3086,7 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, p4dp = p4d_offset_lockless(pgdp, pgd, addr); do { - p4d_t p4d = READ_ONCE(*p4dp); + p4d_t p4d = p4dp_get(p4dp); next = p4d_addr_end(addr, end); if (!p4d_present(p4d)) @@ -3108,7 +3108,7 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end, pgdp = pgd_offset(current->mm, addr); do { - pgd_t pgd = READ_ONCE(*pgdp); + pgd_t pgd = pgdp_get(pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) diff --git a/mm/hmm.c b/mm/hmm.c index 87562914670a1f..a56081d67ad691 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -491,7 +491,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, /* Normally we don't want to split the huge page */ walk->action = ACTION_CONTINUE; - pud = READ_ONCE(*pudp); + pud = pudp_get(pudp); if (!pud_present(pud)) { spin_unlock(ptl); return hmm_vma_walk_hole(start, end, -1, walk); diff --git a/mm/memory.c b/mm/memory.c index 61748b762876f4..f13b20b702f6c1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6690,12 +6690,12 @@ retry: goto out; p4dp = p4d_offset(pgdp, address); - p4d = READ_ONCE(*p4dp); + p4d = p4dp_get(p4dp); if (p4d_none(p4d) || unlikely(p4d_bad(p4d))) goto out; pudp = pud_offset(p4dp, address); - pud = READ_ONCE(*pudp); + pud = pudp_get(pudp); if (pud_none(pud)) goto out; if (pud_leaf(pud)) { diff --git a/mm/mprotect.c b/mm/mprotect.c index 113b489858341f..988c366137d506 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -599,7 +599,7 @@ again: break; } - pud = READ_ONCE(*pudp); + pud = pudp_get(pudp); if (pud_none(pud)) continue; diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index dbd8daccade28c..37522d6cb39885 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -439,7 +439,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end, return -ENOMEM; pmd = pmd_offset(pud, addr); - if (pmd_none(READ_ONCE(*pmd))) { + if (pmd_none(pmdp_get(pmd))) { void *p; p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); diff --git a/mm/vmscan.c b/mm/vmscan.c index b2fc8b626d3dff..2239de111fa636 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3773,7 +3773,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, pud = pud_offset(p4d, start & P4D_MASK); restart: for (i = pud_index(start), addr = start; addr != end; i++, addr = next) { - pud_t val = READ_ONCE(pud[i]); + pud_t val = pudp_get(pud + i); next = pud_addr_end(addr, end); |
