@@ -394,12 +394,12 @@ __create_page_tables:
* Map the kernel image (starting with PHYS_OFFSET).
*/
mov x0, x26 // swapper_pg_dir
- ldr x5, =KIMAGE_VADDR
+ ldr x5, =KIMAGE_VADDR + TEXT_OFFSET // compile time virt addr of _text
add x5, x5, x23 // add KASLR displacement
create_pgd_entry x0, x5, x3, x6
ldr w6, =kernel_img_size
add x6, x6, x5
- mov x3, x24 // phys offset
+ adrp x3, KERNEL_START // runtime phys addr of _text
create_block_map x0, x7, x3, x5, x6
/*
@@ -71,7 +71,7 @@
DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
-kernel_img_size = _end - (_text - TEXT_OFFSET);
+kernel_img_size = _end - _text;
#ifdef CONFIG_EFI
For historical reasons, there is a 512 KB hole called TEXT_OFFSET below the kernel image in memory. Since this hole is part of the kernel footprint in the early mapping when running with 4 KB pages, we cannot avoid mapping it, but in other cases, e.g., when running with larger page sizes, or in the future, with more granular KASLR, there is no reason to map it explicitly like we do currently. So update the logic so that the hole is mapped only if it occurs as a result of rounding the start address of the kernel to swapper block size, and leave it unmapped otherwise. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/kernel/head.S | 4 ++-- arch/arm64/kernel/image.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) -- 2.5.0 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel