@@ -1432,20 +1432,8 @@ extern pte_t ptep_get(pte_t *ptep);
extern pte_t ptep_get_lockless(pte_t *ptep);
#define ptep_get_lockless ptep_get_lockless
-static inline void set_pte(pte_t *ptep, pte_t pte)
-{
- /*
- * We don't have the mm or vaddr so cannot unfold contig entries (since
- * it requires tlb maintenance). set_pte() is not used in core code, so
- * this should never even be called. Regardless do our best to service
- * any call and emit a warning if there is any attempt to set a pte on
- * top of an existing contig range.
- */
- pte_t orig_pte = __ptep_get(ptep);
-
- WARN_ON_ONCE(pte_valid_cont(orig_pte));
- __set_pte(ptep, pte_mknoncont(pte));
-}
+extern void set_pte(pte_t *ptep, pte_t pte);
+#define set_pte set_pte
extern void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr);
@@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
pte_t *pte = virt_to_kpte(addr);
if (protect)
- set_pte(pte, __pte(pte_val(__ptep_get(pte)) & ~_PAGE_PRESENT));
+ __set_pte(pte, __pte(pte_val(__ptep_get(pte)) & ~_PAGE_PRESENT));
else
- set_pte(pte, __pte(pte_val(__ptep_get(pte)) | _PAGE_PRESENT));
+ __set_pte(pte, __pte(pte_val(__ptep_get(pte)) | _PAGE_PRESENT));
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
@@ -539,7 +539,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
* a page table are directly modified. Thus, the following hook is
* made available.
*/
-static inline void set_pte(pte_t *ptep, pte_t pteval)
+static inline void __set_pte(pte_t *ptep, pte_t pteval)
{
WRITE_ONCE(*ptep, pteval);
}
@@ -551,7 +551,7 @@ static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
if (pte_present(pteval) && pte_exec(pteval))
flush_icache_pte(mm, pteval);
- set_pte(ptep, pteval);
+ __set_pte(ptep, pteval);
}
#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
@@ -790,11 +790,14 @@ extern pte_t ptep_get_lockless(pte_t *ptep);
extern void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval, unsigned int nr);
#define set_ptes set_ptes
+extern void set_pte(pte_t *ptep, pte_t pte);
+#define set_pte set_pte
#else /* CONFIG_THP_CONTPTE */
#define ptep_get __ptep_get
#define set_ptes __set_ptes
+#define set_pte __set_pte
#endif /* CONFIG_THP_CONTPTE */
@@ -72,7 +72,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
val = pte_val(pte) & ~_PAGE_EXEC;
pte = __pte(val);
}
- set_pte(ptep, pte);
+ __set_pte(ptep, pte);
return 0;
}
@@ -186,7 +186,7 @@ static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long
pte_t pte = READ_ONCE(*src_ptep);
if (pte_present(pte))
- set_pte(dst_ptep, __pte(pte_val(pte) | pgprot_val(prot)));
+ __set_pte(dst_ptep, __pte(pte_val(pte) | pgprot_val(prot)));
} while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end);
return 0;
@@ -155,7 +155,7 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
- set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
+ __set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
__pgprot(_PAGE_TABLE)));
} else {
if (gstage_pte_leaf(ptep))
@@ -167,7 +167,7 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}
- set_pte(ptep, *new_pte);
+ __set_pte(ptep, *new_pte);
if (gstage_pte_leaf(ptep))
gstage_remote_tlb_flush(kvm, current_level, addr);
@@ -251,7 +251,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
return;
if (op == GSTAGE_OP_CLEAR)
- set_pte(ptep, __pte(0));
+ __set_pte(ptep, __pte(0));
for (i = 0; i < PTRS_PER_PTE; i++)
gstage_op_pte(kvm, addr + i * next_page_size,
&next_ptep[i], next_ptep_level, op);
@@ -259,9 +259,9 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
put_page(virt_to_page(next_ptep));
} else {
if (op == GSTAGE_OP_CLEAR)
- set_pte(ptep, __pte(0));
+ __set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
- set_pte(ptep, __pte(pte_val(__ptep_get(ptep)) & ~_PAGE_WRITE));
+ __set_pte(ptep, __pte(pte_val(__ptep_get(ptep)) & ~_PAGE_WRITE));
gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}
@@ -325,7 +325,7 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
ptep = &fixmap_pte[pte_index(addr)];
if (pgprot_val(prot))
- set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
+ __set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
else
pte_clear(&init_mm, addr, ptep);
local_flush_tlb_page(addr);
@@ -41,7 +41,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
do {
if (pte_none(__ptep_get(ptep))) {
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
- set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ __set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
}
} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
@@ -327,8 +327,8 @@ asmlinkage void __init kasan_early_init(void)
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
for (i = 0; i < PTRS_PER_PTE; ++i)
- set_pte(kasan_early_shadow_pte + i,
- pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
+ __set_pte(kasan_early_shadow_pte + i,
+ pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
@@ -523,10 +523,10 @@ void __init kasan_init(void)
kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(&kasan_early_shadow_pte[i],
- mk_pte(virt_to_page(kasan_early_shadow_page),
- __pgprot(_PAGE_PRESENT | _PAGE_READ |
- _PAGE_ACCESSED)));
+ __set_pte(&kasan_early_shadow_pte[i],
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ __pgprot(_PAGE_PRESENT | _PAGE_READ |
+ _PAGE_ACCESSED)));
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
init_task.kasan_depth = 0;
@@ -71,7 +71,7 @@ static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
pte_t val = __ptep_get(pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
- set_pte(pte, val);
+ __set_pte(pte, val);
return 0;
}
@@ -121,7 +121,7 @@ static int __split_linear_mapping_pmd(pud_t *pudp,
ptep_new = (pte_t *)page_address(pte_page);
for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
- set_pte(ptep_new, pfn_pte(pfn + i, prot));
+ __set_pte(ptep_new, pfn_pte(pfn + i, prot));
smp_wmb();
@@ -17,6 +17,7 @@
* - __pte_clear()
* - __ptep_set_access_flags()
* - __ptep_set_wrprotect()
+ * - __set_pte()
* - pte_cont()
* - arch_contpte_get_num_contig()
* - pte_valid_cont()
@@ -43,6 +44,7 @@
* - ptep_get()
* - set_ptes()
* - ptep_get_lockless()
+ * - set_pte()
*/
pte_t huge_ptep_get(pte_t *ptep)
@@ -658,4 +660,20 @@ __always_inline pte_t ptep_get_lockless(pte_t *ptep)
return contpte_ptep_get_lockless(ptep);
}
+
+void set_pte(pte_t *ptep, pte_t pte)
+{
+ /*
+ * We don't have the mm or vaddr so cannot unfold contig entries (since
+ * it requires tlb maintenance). set_pte() is not used in core code, so
+ * this should never even be called. Regardless do our best to service
+ * any call and emit a warning if there is any attempt to set a pte on
+ * top of an existing contig range.
+ */
+ pte_t orig_pte = __ptep_get(ptep);
+
+ WARN_ON_ONCE(pte_valid_cont(orig_pte));
+ __set_pte(ptep, pte_mknoncont(pte));
+}
+
#endif /* CONFIG_THP_CONTPTE */
Make riscv use the contpte aware set_pte() function from arm64. Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> --- arch/arm64/include/asm/pgtable.h | 16 ++-------------- arch/riscv/include/asm/kfence.h | 4 ++-- arch/riscv/include/asm/pgtable.h | 7 +++++-- arch/riscv/kernel/efi.c | 2 +- arch/riscv/kernel/hibernate.c | 2 +- arch/riscv/kvm/mmu.c | 10 +++++----- arch/riscv/mm/init.c | 2 +- arch/riscv/mm/kasan_init.c | 14 +++++++------- arch/riscv/mm/pageattr.c | 4 ++-- mm/contpte.c | 18 ++++++++++++++++++ 10 files changed, 44 insertions(+), 35 deletions(-)