diff mbox series

[v4.9.y,09/27] arm64: mm: Map entry trampoline into trampoline and kernel page tables

Message ID 20180403110923.43575-10-mark.rutland@arm.com
State New
Headers show
Series arm64 meltdown patches | expand

Commit Message

Mark Rutland April 3, 2018, 11:09 a.m. UTC
From: Will Deacon <will.deacon@arm.com>


commit 51a0048beb44 upstream.

The exception entry trampoline needs to be mapped at the same virtual
address in both the trampoline page table (which maps nothing else)
and also the kernel page table, so that we can swizzle TTBR1_EL1 on
exceptions from and return to EL0.

This patch maps the trampoline at a fixed virtual address in the fixmap
area of the kernel virtual address space, which allows the kernel proper
to be randomized with respect to the trampoline when KASLR is enabled.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>

Tested-by: Laura Abbott <labbott@redhat.com>

Tested-by: Shanker Donthineni <shankerd@codeaurora.org>

Signed-off-by: Will Deacon <will.deacon@arm.com>

Signed-off-by: Alex Shi <alex.shi@linaro.org>

Reviewed-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]

---
 arch/arm64/include/asm/fixmap.h  |  5 +++++
 arch/arm64/include/asm/pgtable.h |  1 +
 arch/arm64/kernel/asm-offsets.c  |  6 +++++-
 arch/arm64/mm/mmu.c              | 23 +++++++++++++++++++++++
 4 files changed, 34 insertions(+), 1 deletion(-)

-- 
2.11.0

Comments

Mark Rutland April 3, 2018, 11:15 a.m. UTC | #1
On Tue, Apr 03, 2018 at 12:09:05PM +0100, Mark Rutland wrote:
> From: Will Deacon <will.deacon@arm.com>

> 

> commit 51a0048beb44 upstream.

> 

> The exception entry trampoline needs to be mapped at the same virtual

> address in both the trampoline page table (which maps nothing else)

> and also the kernel page table, so that we can swizzle TTBR1_EL1 on

> exceptions from and return to EL0.

> 

> This patch maps the trampoline at a fixed virtual address in the fixmap

> area of the kernel virtual address space, which allows the kernel proper

> to be randomized with respect to the trampoline when KASLR is enabled.

> 

> Reviewed-by: Mark Rutland <mark.rutland@arm.com>

> Tested-by: Laura Abbott <labbott@redhat.com>

> Tested-by: Shanker Donthineni <shankerd@codeaurora.org>

> Signed-off-by: Will Deacon <will.deacon@arm.com>

> Signed-off-by: Alex Shi <alex.shi@linaro.org>

> Reviewed-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]


It has just been pointed out to me that I messed up the SoB chain here,
and this should be:

Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]


Otherwise, the patch itself is fine. Sorry for the noise there -- I hope
this can be fixed up when applying?

Thanks,
Mark.
Greg KH April 5, 2018, 7:33 p.m. UTC | #2
On Tue, Apr 03, 2018 at 12:15:06PM +0100, Mark Rutland wrote:
> On Tue, Apr 03, 2018 at 12:09:05PM +0100, Mark Rutland wrote:

> > From: Will Deacon <will.deacon@arm.com>

> > 

> > commit 51a0048beb44 upstream.

> > 

> > The exception entry trampoline needs to be mapped at the same virtual

> > address in both the trampoline page table (which maps nothing else)

> > and also the kernel page table, so that we can swizzle TTBR1_EL1 on

> > exceptions from and return to EL0.

> > 

> > This patch maps the trampoline at a fixed virtual address in the fixmap

> > area of the kernel virtual address space, which allows the kernel proper

> > to be randomized with respect to the trampoline when KASLR is enabled.

> > 

> > Reviewed-by: Mark Rutland <mark.rutland@arm.com>

> > Tested-by: Laura Abbott <labbott@redhat.com>

> > Tested-by: Shanker Donthineni <shankerd@codeaurora.org>

> > Signed-off-by: Will Deacon <will.deacon@arm.com>

> > Signed-off-by: Alex Shi <alex.shi@linaro.org>

> > Reviewed-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]

> 

> It has just been pointed out to me that I messed up the SoB chain here,

> and this should be:

> 

> Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]

> 

> Otherwise, the patch itself is fine. Sorry for the noise there -- I hope

> this can be fixed up when applying?


I'll go fix it up...
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index caf86be815ba..7b1d88c18143 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -51,6 +51,11 @@  enum fixed_addresses {
 
 	FIX_EARLYCON_MEM_BASE,
 	FIX_TEXT_POKE0,
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	FIX_ENTRY_TRAMP_TEXT,
+#define TRAMP_VALIAS		(__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 	__end_of_permanent_fixed_addresses,
 
 	/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7acd3c5c7643..3a30a3994e4a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -692,6 +692,7 @@  static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
 
 /*
  * Encode and decode a swap entry:
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index c58ddf8c4062..5f4bf3c6f016 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,6 +24,7 @@ 
 #include <linux/kvm_host.h>
 #include <linux/suspend.h>
 #include <asm/cpufeature.h>
+#include <asm/fixmap.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/smp_plat.h>
@@ -144,11 +145,14 @@  int main(void)
   DEFINE(ARM_SMCCC_RES_X2_OFFS,		offsetof(struct arm_smccc_res, a2));
   DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,	offsetof(struct arm_smccc_quirk, id));
   DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,	offsetof(struct arm_smccc_quirk, state));
-
   BLANK();
   DEFINE(HIBERN_PBE_ORIG,	offsetof(struct pbe, orig_address));
   DEFINE(HIBERN_PBE_ADDR,	offsetof(struct pbe, address));
   DEFINE(HIBERN_PBE_NEXT,	offsetof(struct pbe, next));
   DEFINE(ARM64_FTR_SYSVAL,	offsetof(struct arm64_ftr_reg, sys_val));
+  BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+  DEFINE(TRAMP_VALIAS,		TRAMP_VALIAS);
+#endif
   return 0;
 }
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 638f7f2bd79c..3a57fec16b32 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -419,6 +419,29 @@  static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
 	vm_area_add_early(vma);
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __init map_entry_trampoline(void)
+{
+	extern char __entry_tramp_text_start[];
+
+	pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+	/* The trampoline is always mapped and can therefore be global */
+	pgprot_val(prot) &= ~PTE_NG;
+
+	/* Map only the text into the trampoline page table */
+	memset(tramp_pg_dir, 0, PGD_SIZE);
+	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+			     prot, pgd_pgtable_alloc, 0);
+
+	/* ...as well as the kernel page table */
+	__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+	return 0;
+}
+core_initcall(map_entry_trampoline);
+#endif
+
 /*
  * Create fine-grained mappings for the kernel.
  */