diff mbox

arm64: make CONFIG_DEBUG_RODATA non-optional

Message ID 20160128000906.GA42530@davidb.org
State New
Headers show

Commit Message

David Brown Jan. 28, 2016, 12:09 a.m. UTC
From 2efef8aa0f8f7f6277ffebe4ea6744fc93d54644 Mon Sep 17 00:00:00 2001
From: David Brown <david.brown@linaro.org>

Date: Wed, 27 Jan 2016 13:58:44 -0800

This removes the CONFIG_DEBUG_RODATA option and makes it always
enabled.

Signed-off-by: David Brown <david.brown@linaro.org>

---
v1: This is in the same spirit as the x86 patch, removing allowing
this option to be config selected.  The associated patch series adds a
runtime option for the same thing.  However, it does affect the way
some things are mapped, and could possibly result in either increased
memory usage, or a performance hit (due to TLB misses from 4K pages).

I've tested this on a Hikey 96board (hi6220-hikey.dtb), both with and
without 'rodata=off' on the command line.

 arch/arm64/Kconfig              |  3 +++
 arch/arm64/Kconfig.debug        | 10 ----------
 arch/arm64/kernel/insn.c        |  2 +-
 arch/arm64/kernel/vmlinux.lds.S |  5 +----
 arch/arm64/mm/mmu.c             | 12 ------------
 5 files changed, 5 insertions(+), 27 deletions(-)

-- 
2.7.0

Comments

Ard Biesheuvel Jan. 28, 2016, 8:20 a.m. UTC | #1
On 28 January 2016 at 01:14, Kees Cook <keescook@chromium.org> wrote:
> On Wed, Jan 27, 2016 at 4:09 PM, David Brown <david.brown@linaro.org> wrote:

>> From 2efef8aa0f8f7f6277ffebe4ea6744fc93d54644 Mon Sep 17 00:00:00 2001

>> From: David Brown <david.brown@linaro.org>

>> Date: Wed, 27 Jan 2016 13:58:44 -0800

>>

>> This removes the CONFIG_DEBUG_RODATA option and makes it always

>> enabled.

>>

>> Signed-off-by: David Brown <david.brown@linaro.org>

>

> I'm all for this!

>


I agree that this is probably a good idea, but please note that Mark
Rutland's pagetable rework patches targeted for v4.6 make significant
changes in this area, so you're probably better off building on top of
those.

-- 
Ard.


> Reviewed-by: Kees Cook <keescook@chromium.org>

>

> -Kees

>

>> ---

>> v1: This is in the same spirit as the x86 patch, removing allowing

>> this option to be config selected.  The associated patch series adds a

>> runtime option for the same thing.  However, it does affect the way

>> some things are mapped, and could possibly result in either increased

>> memory usage, or a performance hit (due to TLB misses from 4K pages).

>>

>> I've tested this on a Hikey 96board (hi6220-hikey.dtb), both with and

>> without 'rodata=off' on the command line.

>>

>> arch/arm64/Kconfig              |  3 +++

>> arch/arm64/Kconfig.debug        | 10 ----------

>> arch/arm64/kernel/insn.c        |  2 +-

>> arch/arm64/kernel/vmlinux.lds.S |  5 +----

>> arch/arm64/mm/mmu.c             | 12 ------------

>> 5 files changed, 5 insertions(+), 27 deletions(-)

>>

>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig

>> index 8cc6228..ffa617a 100644

>> --- a/arch/arm64/Kconfig

>> +++ b/arch/arm64/Kconfig

>> @@ -201,6 +201,9 @@ config KERNEL_MODE_NEON

>> config FIX_EARLYCON_MEM

>>         def_bool y

>>

>> +config DEBUG_RODATA

>> +       def_bool y

>> +

>> config PGTABLE_LEVELS

>>         int

>>         default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36

>> diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug

>> index e13c4bf..db994ec 100644

>> --- a/arch/arm64/Kconfig.debug

>> +++ b/arch/arm64/Kconfig.debug

>> @@ -48,16 +48,6 @@ config DEBUG_SET_MODULE_RONX

>>           against certain classes of kernel exploits.

>>           If in doubt, say "N".

>>

>> -config DEBUG_RODATA

>> -       bool "Make kernel text and rodata read-only"

>> -       help

>> -         If this is set, kernel text and rodata will be made read-only.

>> This

>> -         is to help catch accidental or malicious attempts to change the

>> -         kernel's executable code. Additionally splits rodata from kernel

>> -         text so it can be made explicitly non-executable.

>> -

>> -          If in doubt, say Y

>> -

>> config DEBUG_ALIGN_RODATA

>>         depends on DEBUG_RODATA && ARM64_4K_PAGES

>>         bool "Align linker sections up to SECTION_SIZE"

>> diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c

>> index 7371455..a04bdef 100644

>> --- a/arch/arm64/kernel/insn.c

>> +++ b/arch/arm64/kernel/insn.c

>> @@ -95,7 +95,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)

>>

>>         if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))

>>                 page = vmalloc_to_page(addr);

>> -       else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))

>> +       else if (!module)

>>                 page = virt_to_page(addr);

>>         else

>>                 return addr;

>> diff --git a/arch/arm64/kernel/vmlinux.lds.S

>> b/arch/arm64/kernel/vmlinux.lds.S

>> index e3928f5..f80903c 100644

>> --- a/arch/arm64/kernel/vmlinux.lds.S

>> +++ b/arch/arm64/kernel/vmlinux.lds.S

>> @@ -65,12 +65,9 @@ PECOFF_FILE_ALIGNMENT = 0x200;

>> #if defined(CONFIG_DEBUG_ALIGN_RODATA)

>> #define ALIGN_DEBUG_RO                  . = ALIGN(1<<SECTION_SHIFT);

>> #define ALIGN_DEBUG_RO_MIN(min)         ALIGN_DEBUG_RO

>> -#elif defined(CONFIG_DEBUG_RODATA)

>> +#else

>> #define ALIGN_DEBUG_RO                  . = ALIGN(1<<PAGE_SHIFT);

>> #define ALIGN_DEBUG_RO_MIN(min)         ALIGN_DEBUG_RO

>> -#else

>> -#define ALIGN_DEBUG_RO

>> -#define ALIGN_DEBUG_RO_MIN(min)                . = ALIGN(min);

>> #endif

>>

>> SECTIONS

>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c

>> index 58faeaa..3b411b7 100644

>> --- a/arch/arm64/mm/mmu.c

>> +++ b/arch/arm64/mm/mmu.c

>> @@ -313,7 +313,6 @@ static void create_mapping_late(phys_addr_t phys,

>> unsigned long virt,

>>                                 phys, virt, size, prot, late_alloc);

>> }

>>

>> -#ifdef CONFIG_DEBUG_RODATA

>> static void __init __map_memblock(phys_addr_t start, phys_addr_t end)

>> {

>>         /*

>> @@ -347,13 +346,6 @@ static void __init __map_memblock(phys_addr_t start,

>> phys_addr_t end)

>>         }

>>

>> }

>> -#else

>> -static void __init __map_memblock(phys_addr_t start, phys_addr_t end)

>> -{

>> -       create_mapping(start, __phys_to_virt(start), end - start,

>> -                       PAGE_KERNEL_EXEC);

>> -}

>> -#endif

>>

>> static void __init map_mem(void)

>> {

>> @@ -410,7 +402,6 @@ static void __init map_mem(void)

>>

>> static void __init fixup_executable(void)

>> {

>> -#ifdef CONFIG_DEBUG_RODATA

>>         /* now that we are actually fully mapped, make the start/end more

>> fine grained */

>>         if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {

>>                 unsigned long aligned_start = round_down(__pa(_stext),

>> @@ -428,10 +419,8 @@ static void __init fixup_executable(void)

>>                                 aligned_end - __pa(__init_end),

>>                                 PAGE_KERNEL);

>>         }

>> -#endif

>> }

>>

>> -#ifdef CONFIG_DEBUG_RODATA

>> void mark_rodata_ro(void)

>> {

>>         create_mapping_late(__pa(_stext), (unsigned long)_stext,

>> @@ -439,7 +428,6 @@ void mark_rodata_ro(void)

>>                                 PAGE_KERNEL_ROX);

>>

>> }

>> -#endif

>>

>> void fixup_init(void)

>> {

>> --

>> 2.7.0

>>

>

>

>

> --

> Kees Cook

> Chrome OS & Brillo Security
Mark Rutland Jan. 28, 2016, 11:06 a.m. UTC | #2
Hi,

On Wed, Jan 27, 2016 at 05:09:06PM -0700, David Brown wrote:
> From 2efef8aa0f8f7f6277ffebe4ea6744fc93d54644 Mon Sep 17 00:00:00 2001

> From: David Brown <david.brown@linaro.org>

> Date: Wed, 27 Jan 2016 13:58:44 -0800

> 

> This removes the CONFIG_DEBUG_RODATA option and makes it always

> enabled.

> 

> Signed-off-by: David Brown <david.brown@linaro.org>


As Ard notes, my pagetable rework series [1] changes this code quite
significantly, and this will need to be rebased atop of that (and
possibly Ard's kASLR changes [2]).

I certainly want to always have the kernel text RO. I was waiting for
those two series to settle first.

> ---

> v1: This is in the same spirit as the x86 patch, removing allowing

> this option to be config selected.  The associated patch series adds a

> runtime option for the same thing.  However, it does affect the way

> some things are mapped, and could possibly result in either increased

> memory usage, or a performance hit (due to TLB misses from 4K pages).


With my series [1] the text/rodata "chunk" of the kernel is always
mapped with page-granular boundaries (using sections internally).
Previously we always carved out the init area, so the kernel mapping was
always split.

Atop of my series this change should not increase memory usage or TLB
pressure given that it should only change the permissions.

One thing I would like to do is to avoid the need for fixup_executable
entirely, by mapping the kernel text RO from the outset. However, that
requires rework of the alternatives patching (to use a temporary RW
alias), and I haven't had the time to look into that yet.

Thanks,
Mark.

[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2016-January/397095.html
[2] http://lists.infradead.org/pipermail/linux-arm-kernel/2016-January/402066.html

> I've tested this on a Hikey 96board (hi6220-hikey.dtb), both with and

> without 'rodata=off' on the command line.

> 

> arch/arm64/Kconfig              |  3 +++

> arch/arm64/Kconfig.debug        | 10 ----------

> arch/arm64/kernel/insn.c        |  2 +-

> arch/arm64/kernel/vmlinux.lds.S |  5 +----

> arch/arm64/mm/mmu.c             | 12 ------------

> 5 files changed, 5 insertions(+), 27 deletions(-)

> 

> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig

> index 8cc6228..ffa617a 100644

> --- a/arch/arm64/Kconfig

> +++ b/arch/arm64/Kconfig

> @@ -201,6 +201,9 @@ config KERNEL_MODE_NEON

> config FIX_EARLYCON_MEM

> 	def_bool y

> 

> +config DEBUG_RODATA

> +	def_bool y

> +

> config PGTABLE_LEVELS

> 	int

> 	default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36

> diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug

> index e13c4bf..db994ec 100644

> --- a/arch/arm64/Kconfig.debug

> +++ b/arch/arm64/Kconfig.debug

> @@ -48,16 +48,6 @@ config DEBUG_SET_MODULE_RONX

>           against certain classes of kernel exploits.

>           If in doubt, say "N".

> 

> -config DEBUG_RODATA

> -	bool "Make kernel text and rodata read-only"

> -	help

> -	  If this is set, kernel text and rodata will be made read-only. This

> -	  is to help catch accidental or malicious attempts to change the

> -	  kernel's executable code. Additionally splits rodata from kernel

> -	  text so it can be made explicitly non-executable.

> -

> -          If in doubt, say Y

> -

> config DEBUG_ALIGN_RODATA

> 	depends on DEBUG_RODATA && ARM64_4K_PAGES

> 	bool "Align linker sections up to SECTION_SIZE"

> diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c

> index 7371455..a04bdef 100644

> --- a/arch/arm64/kernel/insn.c

> +++ b/arch/arm64/kernel/insn.c

> @@ -95,7 +95,7 @@ static void __kprobes *patch_map(void *addr, int fixmap)

> 

> 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))

> 		page = vmalloc_to_page(addr);

> -	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))

> +	else if (!module)

> 		page = virt_to_page(addr);

> 	else

> 		return addr;

> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S

> index e3928f5..f80903c 100644

> --- a/arch/arm64/kernel/vmlinux.lds.S

> +++ b/arch/arm64/kernel/vmlinux.lds.S

> @@ -65,12 +65,9 @@ PECOFF_FILE_ALIGNMENT = 0x200;

> #if defined(CONFIG_DEBUG_ALIGN_RODATA)

> #define ALIGN_DEBUG_RO			. = ALIGN(1<<SECTION_SHIFT);

> #define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO

> -#elif defined(CONFIG_DEBUG_RODATA)

> +#else

> #define ALIGN_DEBUG_RO			. = ALIGN(1<<PAGE_SHIFT);

> #define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO

> -#else

> -#define ALIGN_DEBUG_RO

> -#define ALIGN_DEBUG_RO_MIN(min)		. = ALIGN(min);

> #endif

> 

> SECTIONS

> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c

> index 58faeaa..3b411b7 100644

> --- a/arch/arm64/mm/mmu.c

> +++ b/arch/arm64/mm/mmu.c

> @@ -313,7 +313,6 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,

> 				phys, virt, size, prot, late_alloc);

> }

> 

> -#ifdef CONFIG_DEBUG_RODATA

> static void __init __map_memblock(phys_addr_t start, phys_addr_t end)

> {

> 	/*

> @@ -347,13 +346,6 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)

> 	}

> 

> }

> -#else

> -static void __init __map_memblock(phys_addr_t start, phys_addr_t end)

> -{

> -	create_mapping(start, __phys_to_virt(start), end - start,

> -			PAGE_KERNEL_EXEC);

> -}

> -#endif

> 

> static void __init map_mem(void)

> {

> @@ -410,7 +402,6 @@ static void __init map_mem(void)

> 

> static void __init fixup_executable(void)

> {

> -#ifdef CONFIG_DEBUG_RODATA

> 	/* now that we are actually fully mapped, make the start/end more fine grained */

> 	if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {

> 		unsigned long aligned_start = round_down(__pa(_stext),

> @@ -428,10 +419,8 @@ static void __init fixup_executable(void)

> 				aligned_end - __pa(__init_end),

> 				PAGE_KERNEL);

> 	}

> -#endif

> }

> 

> -#ifdef CONFIG_DEBUG_RODATA

> void mark_rodata_ro(void)

> {

> 	create_mapping_late(__pa(_stext), (unsigned long)_stext,

> @@ -439,7 +428,6 @@ void mark_rodata_ro(void)

> 				PAGE_KERNEL_ROX);

> 

> }

> -#endif

> 

> void fixup_init(void)

> {

> -- 

> 2.7.0

>
Mark Rutland Jan. 28, 2016, 2:59 p.m. UTC | #3
On Thu, Jan 28, 2016 at 06:06:53AM -0800, Kees Cook wrote:
> On Thu, Jan 28, 2016 at 3:06 AM, Mark Rutland <mark.rutland@arm.com> wrote:

> > One thing I would like to do is to avoid the need for fixup_executable

> > entirely, by mapping the kernel text RO from the outset. However, that

> > requires rework of the alternatives patching (to use a temporary RW

> > alias), and I haven't had the time to look into that yet.

> 

> This makes perfect sense for the rodata section, but the (future)

> postinit_rodata section we'll still want to mark RO after init

> finishes. x86 and ARM cheat by marking both RO after init, and they

> don't have to pad sections. parisc will need to solve this too.


Depending on how many postinit_rodata variables there are, we might be
able to drop those in .rodata, have them RO always, and initialise them
via a temporary RW alias (e.g. something in the vmalloc area).

The only requirement for that is that we use a helper to initialise any
__postinit_ro variables via a temporary RW alias, e.g.

#define SET_POST_INIT_RO(ptr, v) ({ 		\\
	typeof(ptr) __ptr_rw = (ptr)		\\
	BUG_ON(initcalls_done);			\\
	__ptr_rw = create_rw_alias(__ptr);	\\
	__ptr_rw = v;				\\
	destroy_rw_alias(__ptr_rw);		\\
})

...

__postinit_ro void *thing;

void __init some_init_func(void) {
	void *__thing = some_ranodomized_allocator();
	SET_POSTINIT_RO(thing, thing);
}


Mark.
diff mbox

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8cc6228..ffa617a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -201,6 +201,9 @@  config KERNEL_MODE_NEON
 config FIX_EARLYCON_MEM
 	def_bool y
 
+config DEBUG_RODATA
+	def_bool y
+
 config PGTABLE_LEVELS
 	int
 	default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index e13c4bf..db994ec 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -48,16 +48,6 @@  config DEBUG_SET_MODULE_RONX
           against certain classes of kernel exploits.
           If in doubt, say "N".
 
-config DEBUG_RODATA
-	bool "Make kernel text and rodata read-only"
-	help
-	  If this is set, kernel text and rodata will be made read-only. This
-	  is to help catch accidental or malicious attempts to change the
-	  kernel's executable code. Additionally splits rodata from kernel
-	  text so it can be made explicitly non-executable.
-
-          If in doubt, say Y
-
 config DEBUG_ALIGN_RODATA
 	depends on DEBUG_RODATA && ARM64_4K_PAGES
 	bool "Align linker sections up to SECTION_SIZE"
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7371455..a04bdef 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -95,7 +95,7 @@  static void __kprobes *patch_map(void *addr, int fixmap)
 
 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
 		page = vmalloc_to_page(addr);
-	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
+	else if (!module)
 		page = virt_to_page(addr);
 	else
 		return addr;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index e3928f5..f80903c 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -65,12 +65,9 @@  PECOFF_FILE_ALIGNMENT = 0x200;
 #if defined(CONFIG_DEBUG_ALIGN_RODATA)
 #define ALIGN_DEBUG_RO			. = ALIGN(1<<SECTION_SHIFT);
 #define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
-#elif defined(CONFIG_DEBUG_RODATA)
+#else
 #define ALIGN_DEBUG_RO			. = ALIGN(1<<PAGE_SHIFT);
 #define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
-#else
-#define ALIGN_DEBUG_RO
-#define ALIGN_DEBUG_RO_MIN(min)		. = ALIGN(min);
 #endif
 
 SECTIONS
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 58faeaa..3b411b7 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -313,7 +313,6 @@  static void create_mapping_late(phys_addr_t phys, unsigned long virt,
 				phys, virt, size, prot, late_alloc);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
 {
 	/*
@@ -347,13 +346,6 @@  static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
 	}
 
 }
-#else
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
-{
-	create_mapping(start, __phys_to_virt(start), end - start,
-			PAGE_KERNEL_EXEC);
-}
-#endif
 
 static void __init map_mem(void)
 {
@@ -410,7 +402,6 @@  static void __init map_mem(void)
 
 static void __init fixup_executable(void)
 {
-#ifdef CONFIG_DEBUG_RODATA
 	/* now that we are actually fully mapped, make the start/end more fine grained */
 	if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
 		unsigned long aligned_start = round_down(__pa(_stext),
@@ -428,10 +419,8 @@  static void __init fixup_executable(void)
 				aligned_end - __pa(__init_end),
 				PAGE_KERNEL);
 	}
-#endif
 }
 
-#ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void)
 {
 	create_mapping_late(__pa(_stext), (unsigned long)_stext,
@@ -439,7 +428,6 @@  void mark_rodata_ro(void)
 				PAGE_KERNEL_ROX);
 
 }
-#endif
 
 void fixup_init(void)
 {