@@ -87,7 +87,7 @@
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
-#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
+#define IDMAP_DIR_SIZE INIT_DIR_SIZE
/* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS
@@ -287,7 +287,7 @@ SYM_FUNC_END(clear_page_tables)
SYM_FUNC_START_LOCAL(create_idmap)
adrp x0, idmap_pg_dir
- adrp x3, __idmap_text_start // __pa(__idmap_text_start)
+ adrp x3, _text // __pa(_text)
#ifdef CONFIG_ARM64_VA_BITS_52
mrs_s x6, SYS_ID_AA64MMFR2_EL1
@@ -312,10 +312,10 @@ SYM_FUNC_START_LOCAL(create_idmap)
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
- * the physical address of __idmap_text_end.
+ * the physical address of _end.
*/
mov x4, PTRS_PER_PGD
- adrp x5, __idmap_text_end
+ adrp x5, _end
clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
b.ge 1f // .. then skip VA range extension
@@ -351,7 +351,7 @@ SYM_FUNC_START_LOCAL(create_idmap)
mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
#endif
1:
- adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
+ adr_l x6, _end // __pa(_end)
mov x7, SWAPPER_MM_MMUFLAGS
map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
@@ -884,7 +884,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
msr sctlr_el1, x20 // disable the MMU
isb
bl clear_page_tables
- bl create_kernel_mapping // recreate kernel mapping
+ bl create_kernel_mapping // Recreate kernel mapping
tlbi vmalle1 // Remove any stale TLB entries
dsb nsh
As a first step towards avoiding the need to create, tear down and recreate the kernel virtual mapping with MMU and caches disabled, start by expanding the ID map so it covers the page tables as well as all executable code. This will allow us to populate the page tables with the MMU and caches on, and call KASLR init code before setting up the virtual mapping. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/kernel-pgtable.h | 2 +- arch/arm64/kernel/head.S | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-)