diff mbox

arm64: mm: use correct mapping granularity under DEBUG_RODATA

Message ID 1447669094-31076-1-git-send-email-ard.biesheuvel@linaro.org
State Accepted
Commit 4fee9f364b9b99f76732f2a6fd6df679a237fa74
Headers show

Commit Message

Ard Biesheuvel Nov. 16, 2015, 10:18 a.m. UTC
When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA
and resides at an offset that is not a multiple of 512 MB, the rounding
that occurs in __map_memblock() and fixup_executable() results in
incorrect regions being mapped.

The following snippet from /sys/kernel/debug/kernel_page_tables shows
how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000,
the first 2 MB of memory (which may be inaccessible from non-secure EL1
or just reserved by the firmware) is inadvertently mapped into the end of
the module region.

  ---[ Modules start ]---
  0xfffffdffffe00000-0xfffffe0000000000     2M RW NX ... UXN MEM/NORMAL
  ---[ Modules end ]---
  ---[ Kernel Mapping ]---
  0xfffffe0000000000-0xfffffe0000090000   576K RW NX ... UXN MEM/NORMAL
  0xfffffe0000090000-0xfffffe0000200000  1472K ro x  ... UXN MEM/NORMAL
  0xfffffe0000200000-0xfffffe0000800000     6M ro x  ... UXN MEM/NORMAL
  0xfffffe0000800000-0xfffffe0000810000    64K ro x  ... UXN MEM/NORMAL
  0xfffffe0000810000-0xfffffe0000a00000  1984K RW NX ... UXN MEM/NORMAL
  0xfffffe0000a00000-0xfffffe00ffe00000  4084M RW NX ... UXN MEM/NORMAL

The same issue is likely to occur on 16k pages kernels whose load
address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to
SWAPPER_BLOCK_SIZE instead of SECTION_SIZE.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

---
 arch/arm64/mm/mmu.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

-- 
1.9.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

Comments

Mark Rutland Nov. 16, 2015, 11:57 a.m. UTC | #1
Hi Ard,

On Mon, Nov 16, 2015 at 11:18:14AM +0100, Ard Biesheuvel wrote:
> When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA

> and resides at an offset that is not a multiple of 512 MB, the rounding

> that occurs in __map_memblock() and fixup_executable() results in

> incorrect regions being mapped.

> 

> The following snippet from /sys/kernel/debug/kernel_page_tables shows

> how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000,

> the first 2 MB of memory (which may be inaccessible from non-secure EL1

> or just reserved by the firmware) is inadvertently mapped into the end of

> the module region.


I assume for the above that you mean the kernel is loaded TEXT_OFFSET
above these addresses. It's only a nit, but it might make things harder
for otehrs to reason about in future.

Perhaps re-word in terms of PHYS_OFFSET/memstart_addr? e.g. "when
PHYS_OFFSET is not a multiple of SECTION_SIZE ...".

>   ---[ Modules start ]---

>   0xfffffdffffe00000-0xfffffe0000000000     2M RW NX ... UXN MEM/NORMAL

>   ---[ Modules end ]---

>   ---[ Kernel Mapping ]---

>   0xfffffe0000000000-0xfffffe0000090000   576K RW NX ... UXN MEM/NORMAL

>   0xfffffe0000090000-0xfffffe0000200000  1472K ro x  ... UXN MEM/NORMAL

>   0xfffffe0000200000-0xfffffe0000800000     6M ro x  ... UXN MEM/NORMAL

>   0xfffffe0000800000-0xfffffe0000810000    64K ro x  ... UXN MEM/NORMAL

>   0xfffffe0000810000-0xfffffe0000a00000  1984K RW NX ... UXN MEM/NORMAL

>   0xfffffe0000a00000-0xfffffe00ffe00000  4084M RW NX ... UXN MEM/NORMAL

> 

> The same issue is likely to occur on 16k pages kernels whose load

> address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to

> SWAPPER_BLOCK_SIZE instead of SECTION_SIZE.

> 

> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>


This looks good to me. With or without the rewording above:

Acked-by: Mark Rutland <mark.rutland@arm.com>


Does this need to be backported to stable?

Mark.

> ---

>  arch/arm64/mm/mmu.c | 12 ++++++------

>  1 file changed, 6 insertions(+), 6 deletions(-)

> 

> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c

> index e3f563c81c48..32ddd893da9a 100644

> --- a/arch/arm64/mm/mmu.c

> +++ b/arch/arm64/mm/mmu.c

> @@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)

>  	 * for now. This will get more fine grained later once all memory

>  	 * is mapped

>  	 */

> -	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);

> -	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);

> +	unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);

> +	unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);

>  

>  	if (end < kernel_x_start) {

>  		create_mapping(start, __phys_to_virt(start),

> @@ -451,18 +451,18 @@ static void __init fixup_executable(void)

>  {

>  #ifdef CONFIG_DEBUG_RODATA

>  	/* now that we are actually fully mapped, make the start/end more fine grained */

> -	if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {

> +	if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {

>  		unsigned long aligned_start = round_down(__pa(_stext),

> -							SECTION_SIZE);

> +							 SWAPPER_BLOCK_SIZE);

>  

>  		create_mapping(aligned_start, __phys_to_virt(aligned_start),

>  				__pa(_stext) - aligned_start,

>  				PAGE_KERNEL);

>  	}

>  

> -	if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {

> +	if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {

>  		unsigned long aligned_end = round_up(__pa(__init_end),

> -							SECTION_SIZE);

> +							  SWAPPER_BLOCK_SIZE);

>  		create_mapping(__pa(__init_end), (unsigned long)__init_end,

>  				aligned_end - __pa(__init_end),

>  				PAGE_KERNEL);

> -- 

> 1.9.1

> 


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Ard Biesheuvel Nov. 16, 2015, noon UTC | #2
On 16 November 2015 at 12:57, Mark Rutland <mark.rutland@arm.com> wrote:
> Hi Ard,

>

> On Mon, Nov 16, 2015 at 11:18:14AM +0100, Ard Biesheuvel wrote:

>> When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA

>> and resides at an offset that is not a multiple of 512 MB, the rounding

>> that occurs in __map_memblock() and fixup_executable() results in

>> incorrect regions being mapped.

>>

>> The following snippet from /sys/kernel/debug/kernel_page_tables shows

>> how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000,

>> the first 2 MB of memory (which may be inaccessible from non-secure EL1

>> or just reserved by the firmware) is inadvertently mapped into the end of

>> the module region.

>

> I assume for the above that you mean the kernel is loaded TEXT_OFFSET

> above these addresses. It's only a nit, but it might make things harder

> for otehrs to reason about in future.

>

> Perhaps re-word in terms of PHYS_OFFSET/memstart_addr? e.g. "when

> PHYS_OFFSET is not a multiple of SECTION_SIZE ...".

>


The clearer the better, so yes, let's reword it.

>>   ---[ Modules start ]---

>>   0xfffffdffffe00000-0xfffffe0000000000     2M RW NX ... UXN MEM/NORMAL

>>   ---[ Modules end ]---

>>   ---[ Kernel Mapping ]---

>>   0xfffffe0000000000-0xfffffe0000090000   576K RW NX ... UXN MEM/NORMAL

>>   0xfffffe0000090000-0xfffffe0000200000  1472K ro x  ... UXN MEM/NORMAL

>>   0xfffffe0000200000-0xfffffe0000800000     6M ro x  ... UXN MEM/NORMAL

>>   0xfffffe0000800000-0xfffffe0000810000    64K ro x  ... UXN MEM/NORMAL

>>   0xfffffe0000810000-0xfffffe0000a00000  1984K RW NX ... UXN MEM/NORMAL

>>   0xfffffe0000a00000-0xfffffe00ffe00000  4084M RW NX ... UXN MEM/NORMAL

>>

>> The same issue is likely to occur on 16k pages kernels whose load

>> address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to

>> SWAPPER_BLOCK_SIZE instead of SECTION_SIZE.

>>

>> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

>

> This looks good to me. With or without the rewording above:

>

> Acked-by: Mark Rutland <mark.rutland@arm.com>

>

> Does this need to be backported to stable?

>


It should be fixed in stable, I think, but SWAPPER_BLOCK_SIZE is new
in v4.4 so we'd need another patch anyway.

-- 
Ard.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Mark Rutland Nov. 16, 2015, 12:16 p.m. UTC | #3
> > Does this need to be backported to stable?

> 

> It should be fixed in stable, I think, but SWAPPER_BLOCK_SIZE is new

> in v4.4 so we'd need another patch anyway.


I was hoping 87d1587bef394cd8 ("arm64: Move swapper pagetable
definitions") would backport easily. I just had a go and that's not the
case, and the conflict is non-trivial.

Regardless, we should probably mark this with a fixes tag so it doesn't
get lost:

Fixes: da141706aea52c1a ("arm64: add better page protections to arm64")

Mark.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Ard Biesheuvel Nov. 16, 2015, 12:18 p.m. UTC | #4
On 16 November 2015 at 13:16, Mark Rutland <mark.rutland@arm.com> wrote:
>> > Does this need to be backported to stable?

>>

>> It should be fixed in stable, I think, but SWAPPER_BLOCK_SIZE is new

>> in v4.4 so we'd need another patch anyway.

>

> I was hoping 87d1587bef394cd8 ("arm64: Move swapper pagetable

> definitions") would backport easily. I just had a go and that's not the

> case, and the conflict is non-trivial.

>

> Regardless, we should probably mark this with a fixes tag so it doesn't

> get lost:

>

> Fixes: da141706aea52c1a ("arm64: add better page protections to arm64")

>


Indeed.

For the backported version, we can just add a #define
SWAPPER_BLOCK_SIZE to mmu.c

-- 
Ard.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Catalin Marinas Nov. 17, 2015, 11:11 a.m. UTC | #5
On Mon, Nov 16, 2015 at 01:18:47PM +0100, Ard Biesheuvel wrote:
> On 16 November 2015 at 13:16, Mark Rutland <mark.rutland@arm.com> wrote:

> >> > Does this need to be backported to stable?

> >>

> >> It should be fixed in stable, I think, but SWAPPER_BLOCK_SIZE is new

> >> in v4.4 so we'd need another patch anyway.

> >

> > I was hoping 87d1587bef394cd8 ("arm64: Move swapper pagetable

> > definitions") would backport easily. I just had a go and that's not the

> > case, and the conflict is non-trivial.

> >

> > Regardless, we should probably mark this with a fixes tag so it doesn't

> > get lost:

> >

> > Fixes: da141706aea52c1a ("arm64: add better page protections to arm64")

> 

> Indeed.

> 

> For the backported version, we can just add a #define

> SWAPPER_BLOCK_SIZE to mmu.c


Patch applied with a fixes tag. I assume you'll prepare a fix for stable
when Greg pings us.

Thanks.

-- 
Catalin

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Ard Biesheuvel Nov. 17, 2015, 11:14 a.m. UTC | #6
On 17 November 2015 at 12:11, Catalin Marinas <catalin.marinas@arm.com> wrote:
> On Mon, Nov 16, 2015 at 01:18:47PM +0100, Ard Biesheuvel wrote:

>> On 16 November 2015 at 13:16, Mark Rutland <mark.rutland@arm.com> wrote:

>> >> > Does this need to be backported to stable?

>> >>

>> >> It should be fixed in stable, I think, but SWAPPER_BLOCK_SIZE is new

>> >> in v4.4 so we'd need another patch anyway.

>> >

>> > I was hoping 87d1587bef394cd8 ("arm64: Move swapper pagetable

>> > definitions") would backport easily. I just had a go and that's not the

>> > case, and the conflict is non-trivial.

>> >

>> > Regardless, we should probably mark this with a fixes tag so it doesn't

>> > get lost:

>> >

>> > Fixes: da141706aea52c1a ("arm64: add better page protections to arm64")

>>

>> Indeed.

>>

>> For the backported version, we can just add a #define

>> SWAPPER_BLOCK_SIZE to mmu.c

>

> Patch applied with a fixes tag. I assume you'll prepare a fix for stable

> when Greg pings us.

>


Indeed. We should pay attention, though, since the patch most likely
applies cleanly to older kernels, and will only break at build time
for lack of a definition of SWAPPER_BLOCK_SIZE.

-- 
Ard.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
diff mbox

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e3f563c81c48..32ddd893da9a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -362,8 +362,8 @@  static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
 	 * for now. This will get more fine grained later once all memory
 	 * is mapped
 	 */
-	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+	unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+	unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
 
 	if (end < kernel_x_start) {
 		create_mapping(start, __phys_to_virt(start),
@@ -451,18 +451,18 @@  static void __init fixup_executable(void)
 {
 #ifdef CONFIG_DEBUG_RODATA
 	/* now that we are actually fully mapped, make the start/end more fine grained */
-	if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+	if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
 		unsigned long aligned_start = round_down(__pa(_stext),
-							SECTION_SIZE);
+							 SWAPPER_BLOCK_SIZE);
 
 		create_mapping(aligned_start, __phys_to_virt(aligned_start),
 				__pa(_stext) - aligned_start,
 				PAGE_KERNEL);
 	}
 
-	if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+	if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
 		unsigned long aligned_end = round_up(__pa(__init_end),
-							SECTION_SIZE);
+							  SWAPPER_BLOCK_SIZE);
 		create_mapping(__pa(__init_end), (unsigned long)__init_end,
 				aligned_end - __pa(__init_end),
 				PAGE_KERNEL);