@@ -141,6 +141,8 @@ u64 __init kaslr_early_init(void)
return offset % SZ_2G;
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
+ u64 end = (u64)_end - (u64)_text + KIMAGE_VADDR;
+
/*
* Randomize the module region over a 2 GB window covering the
* kernel. This reduces the risk of modules leaking information
@@ -150,9 +152,11 @@ u64 __init kaslr_early_init(void)
* resolved normally.)
*/
module_range = SZ_2G - (u64)(_end - _stext);
- module_alloc_base = max((u64)_end + offset - SZ_2G,
+ module_alloc_base = max(end + offset - SZ_2G,
(u64)MODULES_VADDR);
} else {
+ u64 end = (u64)_etext - (u64)_text + KIMAGE_VADDR;
+
/*
* Randomize the module region by setting module_alloc_base to
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
@@ -163,7 +167,7 @@ u64 __init kaslr_early_init(void)
* when ARM64_MODULE_PLTS is enabled.
*/
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
- module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
+ module_alloc_base = end + offset - MODULES_VSIZE;
}
/* use the lower 21 bits to randomize the base of the module region */
We will be entering kaslr_init() fully randomized, and so any addresses taken by this code already take the randomization into account. This means that taking the address of _end or _etext and adding offset to it produces the wrong value, given that _end and _etext references will have been fixed up already, and therefore already incorporate offset. So instead of referring to these symbols directly, use their offsets relative to _text, which should produce values that depend on the size and layout of the Image only. Then, add KIMAGE_VADDR to obtain the unrandomized values. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/kernel/kaslr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)