diff mbox series

[v3,20/20] arm64: kaslr: Put kernel vectors address in separate data page

Message ID 1512563739-25239-21-git-send-email-will.deacon@arm.com
State New
Headers show
Series arm64: Unmap the kernel whilst running in userspace (KPTI) | expand

Commit Message

Will Deacon Dec. 6, 2017, 12:35 p.m. UTC
The literal pool entry for identifying the vectors base is the only piece
of information in the trampoline page that identifies the true location
of the kernel.

This patch moves it into its own page, which is only mapped by the full
kernel page table, which protects against any accidental leakage of the
trampoline contents.

Suggested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>

---
 arch/arm64/include/asm/fixmap.h |  1 +
 arch/arm64/kernel/entry.S       | 11 +++++++++++
 arch/arm64/kernel/vmlinux.lds.S | 35 ++++++++++++++++++++++++++++-------
 arch/arm64/mm/mmu.c             | 10 +++++++++-
 4 files changed, 49 insertions(+), 8 deletions(-)

-- 
2.1.4

Comments

Ard Biesheuvel Dec. 6, 2017, 12:59 p.m. UTC | #1
On 6 December 2017 at 12:35, Will Deacon <will.deacon@arm.com> wrote:
> The literal pool entry for identifying the vectors base is the only piece

> of information in the trampoline page that identifies the true location

> of the kernel.

>

> This patch moves it into its own page, which is only mapped by the full

> kernel page table, which protects against any accidental leakage of the

> trampoline contents.

>

> Suggested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> Signed-off-by: Will Deacon <will.deacon@arm.com>

> ---

>  arch/arm64/include/asm/fixmap.h |  1 +

>  arch/arm64/kernel/entry.S       | 11 +++++++++++

>  arch/arm64/kernel/vmlinux.lds.S | 35 ++++++++++++++++++++++++++++-------

>  arch/arm64/mm/mmu.c             | 10 +++++++++-

>  4 files changed, 49 insertions(+), 8 deletions(-)

>

> diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h

> index 8119b49be98d..ec1e6d6fa14c 100644

> --- a/arch/arm64/include/asm/fixmap.h

> +++ b/arch/arm64/include/asm/fixmap.h

> @@ -59,6 +59,7 @@ enum fixed_addresses {

>  #endif /* CONFIG_ACPI_APEI_GHES */

>

>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0

> +       FIX_ENTRY_TRAMP_DATA,

>         FIX_ENTRY_TRAMP_TEXT,

>  #define TRAMP_VALIAS           (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))

>  #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S

> index 3eabcb194c87..a70c6dd2cc19 100644

> --- a/arch/arm64/kernel/entry.S

> +++ b/arch/arm64/kernel/entry.S

> @@ -1030,7 +1030,13 @@ alternative_else_nop_endif

>         msr     tpidrro_el0, x30        // Restored in kernel_ventry

>         .endif

>         tramp_map_kernel        x30

> +#ifdef CONFIG_RANDOMIZE_BASE

> +       adr     x30, tramp_vectors + PAGE_SIZE

> +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003

> +       ldr     x30, [x30]

> +#else

>         ldr     x30, =vectors

> +#endif

>         prfm    plil1strm, [x30, #(1b - tramp_vectors)]

>         msr     vbar_el1, x30

>         add     x30, x30, #(1b - tramp_vectors)

> @@ -1073,6 +1079,11 @@ END(tramp_exit_compat)

>

>         .ltorg

>         .popsection                             // .entry.tramp.text

> +#ifdef CONFIG_RANDOMIZE_BASE

> +       .pushsection ".entry.tramp.data", "a"   // .entry.tramp.data

> +       .quad   vectors

> +       .popsection                             // .entry.tramp.data


This does not need to be in a section of its own, and doesn't need to
be padded to a full page.

If you just stick this in .rodata but align it to page size, you can
just map whichever page it ends up in into the TRAMP_DATA fixmap slot
(which is a r/o mapping anyway). You could then drop most of the
changes below. And actually, I'm not entirely sure whether it still
makes sense then to do this only for CONFIG_RANDOMIZE_BASE.


> +#endif /* CONFIG_RANDOMIZE_BASE */

>  #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

>

>  /*

> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S

> index 6b4260f22aab..976109b3ae51 100644

> --- a/arch/arm64/kernel/vmlinux.lds.S

> +++ b/arch/arm64/kernel/vmlinux.lds.S

> @@ -58,15 +58,28 @@ jiffies = jiffies_64;

>  #endif

>

>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0

> -#define TRAMP_TEXT                                     \

> -       . = ALIGN(PAGE_SIZE);                           \

> -       VMLINUX_SYMBOL(__entry_tramp_text_start) = .;   \

> -       *(.entry.tramp.text)                            \

> -       . = ALIGN(PAGE_SIZE);                           \

> +#define TRAMP_TEXT                                             \

> +       . = ALIGN(PAGE_SIZE);                                   \

> +       VMLINUX_SYMBOL(__entry_tramp_text_start) = .;           \

> +       *(.entry.tramp.text)                                    \

> +       . = ALIGN(PAGE_SIZE);                                   \

>         VMLINUX_SYMBOL(__entry_tramp_text_end) = .;

> +#ifdef CONFIG_RANDOMIZE_BASE

> +#define TRAMP_DATA                                             \

> +       .entry.tramp.data : {                                   \

> +               . = ALIGN(PAGE_SIZE);                           \

> +               VMLINUX_SYMBOL(__entry_tramp_data_start) = .;   \

> +               *(.entry.tramp.data)                            \

> +               . = ALIGN(PAGE_SIZE);                           \

> +               VMLINUX_SYMBOL(__entry_tramp_data_end) = .;     \

> +       }

> +#else

> +#define TRAMP_DATA

> +#endif /* CONFIG_RANDOMIZE_BASE */

>  #else

>  #define TRAMP_TEXT

> -#endif

> +#define TRAMP_DATA

> +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

>

>  /*

>   * The size of the PE/COFF section that covers the kernel image, which

> @@ -137,6 +150,7 @@ SECTIONS

>         RO_DATA(PAGE_SIZE)              /* everything from this point to     */

>         EXCEPTION_TABLE(8)              /* __init_begin will be marked RO NX */

>         NOTES

> +       TRAMP_DATA

>

>         . = ALIGN(SEGMENT_ALIGN);

>         __init_begin = .;

> @@ -251,7 +265,14 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,

>  ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))

>         <= SZ_4K, "Hibernate exit text too big or misaligned")

>  #endif

> -

> +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0

> +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,

> +       "Entry trampoline text too big")

> +#ifdef CONFIG_RANDOMIZE_BASE

> +ASSERT((__entry_tramp_data_end - __entry_tramp_data_start) == PAGE_SIZE,

> +       "Entry trampoline data too big")

> +#endif

> +#endif

>  /*

>   * If padding is applied before .head.text, virt<->phys conversions will fail.

>   */

> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c

> index fe68a48c64cb..916d9ced1c3f 100644

> --- a/arch/arm64/mm/mmu.c

> +++ b/arch/arm64/mm/mmu.c

> @@ -541,8 +541,16 @@ static int __init map_entry_trampoline(void)

>         __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,

>                              prot, pgd_pgtable_alloc, 0);

>

> -       /* ...as well as the kernel page table */

> +       /* Map both the text and data into the kernel page table */

>         __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);

> +       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {

> +               extern char __entry_tramp_data_start[];

> +

> +               __set_fixmap(FIX_ENTRY_TRAMP_DATA,

> +                            __pa_symbol(__entry_tramp_data_start),

> +                            PAGE_KERNEL_RO);

> +       }

> +

>         return 0;

>  }

>  core_initcall(map_entry_trampoline);

> --

> 2.1.4

>
Will Deacon Dec. 6, 2017, 1:27 p.m. UTC | #2
Hi Ard,

On Wed, Dec 06, 2017 at 12:59:40PM +0000, Ard Biesheuvel wrote:
> On 6 December 2017 at 12:35, Will Deacon <will.deacon@arm.com> wrote:

> > The literal pool entry for identifying the vectors base is the only piece

> > of information in the trampoline page that identifies the true location

> > of the kernel.

> >

> > This patch moves it into its own page, which is only mapped by the full

> > kernel page table, which protects against any accidental leakage of the

> > trampoline contents.


[...]

> > @@ -1073,6 +1079,11 @@ END(tramp_exit_compat)

> >

> >         .ltorg

> >         .popsection                             // .entry.tramp.text

> > +#ifdef CONFIG_RANDOMIZE_BASE

> > +       .pushsection ".entry.tramp.data", "a"   // .entry.tramp.data

> > +       .quad   vectors

> > +       .popsection                             // .entry.tramp.data

> 

> This does not need to be in a section of its own, and doesn't need to

> be padded to a full page.

> 

> If you just stick this in .rodata but align it to page size, you can

> just map whichever page it ends up in into the TRAMP_DATA fixmap slot

> (which is a r/o mapping anyway). You could then drop most of the

> changes below. And actually, I'm not entirely sure whether it still

> makes sense then to do this only for CONFIG_RANDOMIZE_BASE.


Good point; I momentarily forgot this was in the kernel page table anyway.
How about something like the diff below merged on top (so this basically
undoes a bunch of the patch)?

I'd prefer to keep the CONFIG_RANDOMIZE_BASE dependency, at least for now.

Will

--->8

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a70c6dd2cc19..031392ee5f47 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1080,9 +1080,12 @@ END(tramp_exit_compat)
 	.ltorg
 	.popsection				// .entry.tramp.text
 #ifdef CONFIG_RANDOMIZE_BASE
-	.pushsection ".entry.tramp.data", "a"	// .entry.tramp.data
+	.pushsection ".rodata", "a"
+	.align PAGE_SHIFT
+	.globl	__entry_tramp_data_start
+__entry_tramp_data_start:
 	.quad	vectors
-	.popsection				// .entry.tramp.data
+	.popsection				// .rodata
 #endif /* CONFIG_RANDOMIZE_BASE */
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 976109b3ae51..27cf9be20a00 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -64,21 +64,8 @@ jiffies = jiffies_64;
 	*(.entry.tramp.text)					\
 	. = ALIGN(PAGE_SIZE);					\
 	VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
-#ifdef CONFIG_RANDOMIZE_BASE
-#define TRAMP_DATA						\
-	.entry.tramp.data : {					\
-		. = ALIGN(PAGE_SIZE);				\
-		VMLINUX_SYMBOL(__entry_tramp_data_start) = .;	\
-		*(.entry.tramp.data)				\
-		. = ALIGN(PAGE_SIZE);				\
-		VMLINUX_SYMBOL(__entry_tramp_data_end) = .;	\
-	}
-#else
-#define TRAMP_DATA
-#endif /* CONFIG_RANDOMIZE_BASE */
 #else
 #define TRAMP_TEXT
-#define TRAMP_DATA
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 /*
@@ -150,7 +137,6 @@ SECTIONS
 	RO_DATA(PAGE_SIZE)		/* everything from this point to     */
 	EXCEPTION_TABLE(8)		/* __init_begin will be marked RO NX */
 	NOTES
-	TRAMP_DATA
 
 	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
@@ -268,10 +254,6 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
 	"Entry trampoline text too big")
-#ifdef CONFIG_RANDOMIZE_BASE
-ASSERT((__entry_tramp_data_end - __entry_tramp_data_start) == PAGE_SIZE,
-	"Entry trampoline data too big")
-#endif
 #endif
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
Ard Biesheuvel Dec. 6, 2017, 2:03 p.m. UTC | #3
On 6 December 2017 at 13:27, Will Deacon <will.deacon@arm.com> wrote:
> Hi Ard,

>

> On Wed, Dec 06, 2017 at 12:59:40PM +0000, Ard Biesheuvel wrote:

>> On 6 December 2017 at 12:35, Will Deacon <will.deacon@arm.com> wrote:

>> > The literal pool entry for identifying the vectors base is the only piece

>> > of information in the trampoline page that identifies the true location

>> > of the kernel.

>> >

>> > This patch moves it into its own page, which is only mapped by the full

>> > kernel page table, which protects against any accidental leakage of the

>> > trampoline contents.

>

> [...]

>

>> > @@ -1073,6 +1079,11 @@ END(tramp_exit_compat)

>> >

>> >         .ltorg

>> >         .popsection                             // .entry.tramp.text

>> > +#ifdef CONFIG_RANDOMIZE_BASE

>> > +       .pushsection ".entry.tramp.data", "a"   // .entry.tramp.data

>> > +       .quad   vectors

>> > +       .popsection                             // .entry.tramp.data

>>

>> This does not need to be in a section of its own, and doesn't need to

>> be padded to a full page.

>>

>> If you just stick this in .rodata but align it to page size, you can

>> just map whichever page it ends up in into the TRAMP_DATA fixmap slot

>> (which is a r/o mapping anyway). You could then drop most of the

>> changes below. And actually, I'm not entirely sure whether it still

>> makes sense then to do this only for CONFIG_RANDOMIZE_BASE.

>

> Good point; I momentarily forgot this was in the kernel page table anyway.

> How about something like the diff below merged on top (so this basically

> undoes a bunch of the patch)?

>


Yes, that looks much better.

> I'd prefer to keep the CONFIG_RANDOMIZE_BASE dependency, at least for now.

>


Fair enough.

> --->8

>

> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S

> index a70c6dd2cc19..031392ee5f47 100644

> --- a/arch/arm64/kernel/entry.S

> +++ b/arch/arm64/kernel/entry.S

> @@ -1080,9 +1080,12 @@ END(tramp_exit_compat)

>         .ltorg

>         .popsection                             // .entry.tramp.text

>  #ifdef CONFIG_RANDOMIZE_BASE

> -       .pushsection ".entry.tramp.data", "a"   // .entry.tramp.data

> +       .pushsection ".rodata", "a"

> +       .align PAGE_SHIFT

> +       .globl  __entry_tramp_data_start

> +__entry_tramp_data_start:

>         .quad   vectors

> -       .popsection                             // .entry.tramp.data

> +       .popsection                             // .rodata

>  #endif /* CONFIG_RANDOMIZE_BASE */

>  #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

>

> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S

> index 976109b3ae51..27cf9be20a00 100644

> --- a/arch/arm64/kernel/vmlinux.lds.S

> +++ b/arch/arm64/kernel/vmlinux.lds.S

> @@ -64,21 +64,8 @@ jiffies = jiffies_64;

>         *(.entry.tramp.text)                                    \

>         . = ALIGN(PAGE_SIZE);                                   \

>         VMLINUX_SYMBOL(__entry_tramp_text_end) = .;

> -#ifdef CONFIG_RANDOMIZE_BASE

> -#define TRAMP_DATA                                             \

> -       .entry.tramp.data : {                                   \

> -               . = ALIGN(PAGE_SIZE);                           \

> -               VMLINUX_SYMBOL(__entry_tramp_data_start) = .;   \

> -               *(.entry.tramp.data)                            \

> -               . = ALIGN(PAGE_SIZE);                           \

> -               VMLINUX_SYMBOL(__entry_tramp_data_end) = .;     \

> -       }

> -#else

> -#define TRAMP_DATA

> -#endif /* CONFIG_RANDOMIZE_BASE */

>  #else

>  #define TRAMP_TEXT

> -#define TRAMP_DATA

>  #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

>

>  /*

> @@ -150,7 +137,6 @@ SECTIONS

>         RO_DATA(PAGE_SIZE)              /* everything from this point to     */

>         EXCEPTION_TABLE(8)              /* __init_begin will be marked RO NX */

>         NOTES

> -       TRAMP_DATA

>

>         . = ALIGN(SEGMENT_ALIGN);

>         __init_begin = .;

> @@ -268,10 +254,6 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))

>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0

>  ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,

>         "Entry trampoline text too big")

> -#ifdef CONFIG_RANDOMIZE_BASE

> -ASSERT((__entry_tramp_data_end - __entry_tramp_data_start) == PAGE_SIZE,

> -       "Entry trampoline data too big")

> -#endif

>  #endif

>  /*

>   * If padding is applied before .head.text, virt<->phys conversions will fail.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 8119b49be98d..ec1e6d6fa14c 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -59,6 +59,7 @@  enum fixed_addresses {
 #endif /* CONFIG_ACPI_APEI_GHES */
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	FIX_ENTRY_TRAMP_DATA,
 	FIX_ENTRY_TRAMP_TEXT,
 #define TRAMP_VALIAS		(__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 3eabcb194c87..a70c6dd2cc19 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -1030,7 +1030,13 @@  alternative_else_nop_endif
 	msr	tpidrro_el0, x30	// Restored in kernel_ventry
 	.endif
 	tramp_map_kernel	x30
+#ifdef CONFIG_RANDOMIZE_BASE
+	adr	x30, tramp_vectors + PAGE_SIZE
+alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+	ldr	x30, [x30]
+#else
 	ldr	x30, =vectors
+#endif
 	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
 	msr	vbar_el1, x30
 	add	x30, x30, #(1b - tramp_vectors)
@@ -1073,6 +1079,11 @@  END(tramp_exit_compat)
 
 	.ltorg
 	.popsection				// .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+	.pushsection ".entry.tramp.data", "a"	// .entry.tramp.data
+	.quad	vectors
+	.popsection				// .entry.tramp.data
+#endif /* CONFIG_RANDOMIZE_BASE */
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 /*
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6b4260f22aab..976109b3ae51 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -58,15 +58,28 @@  jiffies = jiffies_64;
 #endif
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define TRAMP_TEXT					\
-	. = ALIGN(PAGE_SIZE);				\
-	VMLINUX_SYMBOL(__entry_tramp_text_start) = .;	\
-	*(.entry.tramp.text)				\
-	. = ALIGN(PAGE_SIZE);				\
+#define TRAMP_TEXT						\
+	. = ALIGN(PAGE_SIZE);					\
+	VMLINUX_SYMBOL(__entry_tramp_text_start) = .;		\
+	*(.entry.tramp.text)					\
+	. = ALIGN(PAGE_SIZE);					\
 	VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+#ifdef CONFIG_RANDOMIZE_BASE
+#define TRAMP_DATA						\
+	.entry.tramp.data : {					\
+		. = ALIGN(PAGE_SIZE);				\
+		VMLINUX_SYMBOL(__entry_tramp_data_start) = .;	\
+		*(.entry.tramp.data)				\
+		. = ALIGN(PAGE_SIZE);				\
+		VMLINUX_SYMBOL(__entry_tramp_data_end) = .;	\
+	}
+#else
+#define TRAMP_DATA
+#endif /* CONFIG_RANDOMIZE_BASE */
 #else
 #define TRAMP_TEXT
-#endif
+#define TRAMP_DATA
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 /*
  * The size of the PE/COFF section that covers the kernel image, which
@@ -137,6 +150,7 @@  SECTIONS
 	RO_DATA(PAGE_SIZE)		/* everything from this point to     */
 	EXCEPTION_TABLE(8)		/* __init_begin will be marked RO NX */
 	NOTES
+	TRAMP_DATA
 
 	. = ALIGN(SEGMENT_ALIGN);
 	__init_begin = .;
@@ -251,7 +265,14 @@  ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
 	<= SZ_4K, "Hibernate exit text too big or misaligned")
 #endif
-
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+	"Entry trampoline text too big")
+#ifdef CONFIG_RANDOMIZE_BASE
+ASSERT((__entry_tramp_data_end - __entry_tramp_data_start) == PAGE_SIZE,
+	"Entry trampoline data too big")
+#endif
+#endif
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
  */
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index fe68a48c64cb..916d9ced1c3f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -541,8 +541,16 @@  static int __init map_entry_trampoline(void)
 	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
 			     prot, pgd_pgtable_alloc, 0);
 
-	/* ...as well as the kernel page table */
+	/* Map both the text and data into the kernel page table */
 	__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		extern char __entry_tramp_data_start[];
+
+		__set_fixmap(FIX_ENTRY_TRAMP_DATA,
+			     __pa_symbol(__entry_tramp_data_start),
+			     PAGE_KERNEL_RO);
+	}
+
 	return 0;
 }
 core_initcall(map_entry_trampoline);