aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-10-23 09:26:47 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2025-10-23 09:26:47 -1000
commit266ee584e55eed108583ab4f45b5de734522502d (patch)
treeefa2221863e7edb02b2fad69818c0f3b86577f53
parentab431bc39741e9d9bd3102688439e1864c857a74 (diff)
parentb98c94eed4a975e0c80b7e90a649a46967376f58 (diff)
downloadtip-266ee584e55eed108583ab4f45b5de734522502d.tar.gz
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas: - Do not make a clean PTE dirty in pte_mkwrite() The Arm architecture, for backwards compatibility reasons (ARMv8.0 before in-hardware dirty bit management - DBM), uses the PTE_RDONLY bit to mean !dirty while the PTE_WRITE bit means DBM enabled. The arm64 pte_mkwrite() simply clears the PTE_RDONLY bit and this inadvertently makes the PTE pte_hw_dirty(). Most places making a PTE writable also invoke pte_mkdirty() but do_swap_page() does not and we end up with dirty, freshly swapped in, writeable pages. - Do not warn if the destination page is already MTE-tagged in copy_highpage() In the majority of the cases, a destination page copied into is freshly allocated without the PG_mte_tagged flag set. However, the folio migration may be restarted if __folio_migrate_mapping() failed, triggering the benign WARN_ON_ONCE(). * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: mte: Do not warn if the page is already tagged in copy_highpage() arm64, mm: avoid always making PTE dirty in pte_mkwrite()
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/mm/copypage.c11
2 files changed, 10 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index aa89c2e67ebc84..0944e296dd4a4c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -293,7 +293,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+ if (pte_sw_dirty(pte))
+ pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index a86c897017df08..cd5912ba617b70 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -35,7 +35,7 @@ void copy_highpage(struct page *to, struct page *from)
from != folio_page(src, 0))
return;
- WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
+ folio_try_hugetlb_mte_tagging(dst);
/*
* Populate tags for all subpages.
@@ -51,8 +51,13 @@ void copy_highpage(struct page *to, struct page *from)
}
folio_set_hugetlb_mte_tagged(dst);
} else if (page_mte_tagged(from)) {
- /* It's a new page, shouldn't have been tagged yet */
- WARN_ON_ONCE(!try_page_mte_tagging(to));
+ /*
+ * Most of the time it's a new page that shouldn't have been
+ * tagged yet. However, folio migration can end up reusing the
+ * same page without untagging it. Ignore the warning if the
+ * page is already tagged.
+ */
+ try_page_mte_tagging(to);
mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);