diff mbox series

arm64: kprobe: make page to RO mode when allocate it

Message ID 20181015111600.5479-1-anders.roxell@linaro.org
State Superseded
Headers show
Series arm64: kprobe: make page to RO mode when allocate it | expand

Commit Message

Anders Roxell Oct. 15, 2018, 11:16 a.m. UTC
Commit 1404d6f13e47 ("arm64: dump: Add checking for writable and exectuable pages")
has successfully identified code that leaves a page with W+X
permissions.

[    3.245140] arm64/mm: Found insecure W+X mapping at address (____ptrval____)/0xffff000000d90000
[    3.245771] WARNING: CPU: 0 PID: 1 at ../arch/arm64/mm/dump.c:232 note_page+0x410/0x420
[    3.246141] Modules linked in:
[    3.246653] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.0-rc5-next-20180928-00001-ge70ae259b853-dirty #62
[    3.247008] Hardware name: linux,dummy-virt (DT)
[    3.247347] pstate: 80000005 (Nzcv daif -PAN -UAO)
[    3.247623] pc : note_page+0x410/0x420
[    3.247898] lr : note_page+0x410/0x420
[    3.248071] sp : ffff00000804bcd0
[    3.248254] x29: ffff00000804bcd0 x28: ffff000009274000
[    3.248578] x27: ffff00000921a000 x26: ffff80007dfff000
[    3.248845] x25: ffff0000093f5000 x24: ffff000009526f6a
[    3.249109] x23: 0000000000000004 x22: ffff000000d91000
[    3.249396] x21: ffff000000d90000 x20: 0000000000000000
[    3.249661] x19: ffff00000804bde8 x18: 0000000000000400
[    3.249924] x17: 0000000000000000 x16: 0000000000000000
[    3.250271] x15: ffffffffffffffff x14: 295f5f5f5f6c6176
[    3.250594] x13: 7274705f5f5f5f28 x12: 2073736572646461
[    3.250941] x11: 20746120676e6970 x10: 70616d20582b5720
[    3.251252] x9 : 6572756365736e69 x8 : 3039643030303030
[    3.251519] x7 : 306666666678302f x6 : ffff0000095467b2
[    3.251802] x5 : 0000000000000000 x4 : 0000000000000000
[    3.252060] x3 : 0000000000000000 x2 : ffffffffffffffff
[    3.252323] x1 : 4d151327adc50b00 x0 : 0000000000000000
[    3.252664] Call trace:
[    3.252953]  note_page+0x410/0x420
[    3.253186]  walk_pgd+0x12c/0x238
[    3.253417]  ptdump_check_wx+0x68/0xf8
[    3.253637]  mark_rodata_ro+0x68/0x98
[    3.253847]  kernel_init+0x38/0x160
[    3.254103]  ret_from_fork+0x10/0x18

Reworked to that when allocate a page it sets mode RO. Inspired by
commit 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use text_poke()").

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Co-developed-by: Arnd Bergmann <arnd@arndb.de>
Co-developed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>

---
 arch/arm64/kernel/probes/kprobes.c | 27 ++++++++++++++++++++-------
 1 file changed, 20 insertions(+), 7 deletions(-)

-- 
2.19.1

Comments

Ard Biesheuvel Oct. 15, 2018, 11:22 a.m. UTC | #1
(+ Masami)

On 15 October 2018 at 13:16, Anders Roxell <anders.roxell@linaro.org> wrote:
> Commit 1404d6f13e47 ("arm64: dump: Add checking for writable and exectuable pages")

> has successfully identified code that leaves a page with W+X

> permissions.

>

> [    3.245140] arm64/mm: Found insecure W+X mapping at address (____ptrval____)/0xffff000000d90000

> [    3.245771] WARNING: CPU: 0 PID: 1 at ../arch/arm64/mm/dump.c:232 note_page+0x410/0x420

> [    3.246141] Modules linked in:

> [    3.246653] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.0-rc5-next-20180928-00001-ge70ae259b853-dirty #62

> [    3.247008] Hardware name: linux,dummy-virt (DT)

> [    3.247347] pstate: 80000005 (Nzcv daif -PAN -UAO)

> [    3.247623] pc : note_page+0x410/0x420

> [    3.247898] lr : note_page+0x410/0x420

> [    3.248071] sp : ffff00000804bcd0

> [    3.248254] x29: ffff00000804bcd0 x28: ffff000009274000

> [    3.248578] x27: ffff00000921a000 x26: ffff80007dfff000

> [    3.248845] x25: ffff0000093f5000 x24: ffff000009526f6a

> [    3.249109] x23: 0000000000000004 x22: ffff000000d91000

> [    3.249396] x21: ffff000000d90000 x20: 0000000000000000

> [    3.249661] x19: ffff00000804bde8 x18: 0000000000000400

> [    3.249924] x17: 0000000000000000 x16: 0000000000000000

> [    3.250271] x15: ffffffffffffffff x14: 295f5f5f5f6c6176

> [    3.250594] x13: 7274705f5f5f5f28 x12: 2073736572646461

> [    3.250941] x11: 20746120676e6970 x10: 70616d20582b5720

> [    3.251252] x9 : 6572756365736e69 x8 : 3039643030303030

> [    3.251519] x7 : 306666666678302f x6 : ffff0000095467b2

> [    3.251802] x5 : 0000000000000000 x4 : 0000000000000000

> [    3.252060] x3 : 0000000000000000 x2 : ffffffffffffffff

> [    3.252323] x1 : 4d151327adc50b00 x0 : 0000000000000000

> [    3.252664] Call trace:

> [    3.252953]  note_page+0x410/0x420

> [    3.253186]  walk_pgd+0x12c/0x238

> [    3.253417]  ptdump_check_wx+0x68/0xf8

> [    3.253637]  mark_rodata_ro+0x68/0x98

> [    3.253847]  kernel_init+0x38/0x160

> [    3.254103]  ret_from_fork+0x10/0x18

>

> Reworked to that when allocate a page it sets mode RO. Inspired by

> commit 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use text_poke()").

>

> Cc: Arnd Bergmann <arnd@arndb.de>

> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> Cc: Laura Abbott <labbott@redhat.com>

> Cc: Catalin Marinas <catalin.marinas@arm.com>

> Co-developed-by: Arnd Bergmann <arnd@arndb.de>

> Co-developed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> Signed-off-by: Anders Roxell <anders.roxell@linaro.org>

> ---

>  arch/arm64/kernel/probes/kprobes.c | 27 ++++++++++++++++++++-------

>  1 file changed, 20 insertions(+), 7 deletions(-)

>

> diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c

> index 9b65132e789a..b842e908b423 100644

> --- a/arch/arm64/kernel/probes/kprobes.c

> +++ b/arch/arm64/kernel/probes/kprobes.c

> @@ -23,7 +23,9 @@

>  #include <linux/slab.h>

>  #include <linux/stop_machine.h>

>  #include <linux/sched/debug.h>

> +#include <linux/set_memory.h>

>  #include <linux/stringify.h>

> +#include <linux/vmalloc.h>

>  #include <asm/traps.h>

>  #include <asm/ptrace.h>

>  #include <asm/cacheflush.h>

> @@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

>  static void __kprobes

>  post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);

>

> +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> +{

> +       void *addrs[1];

> +       u32 insns[1];

> +

> +       addrs[0] = (void *)addr;

> +       insns[0] = (u32)opcode;

> +

> +       return aarch64_insn_patch_text(addrs, insns, 1);

> +}

> +

>  static void __kprobes arch_prepare_ss_slot(struct kprobe *p)

>  {

>         /* prepare insn slot */

> -       p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);

> +       patch_text(p->ainsn.api.insn, p->opcode);

>

>         flush_icache_range((uintptr_t) (p->ainsn.api.insn),

>                            (uintptr_t) (p->ainsn.api.insn) +

> @@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)

>         return 0;

>  }

>

> -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> +void *alloc_insn_page(void)

>  {

> -       void *addrs[1];

> -       u32 insns[1];

> +       void *page;

>

> -       addrs[0] = (void *)addr;

> -       insns[0] = (u32)opcode;

> +       page = vmalloc_exec(PAGE_SIZE);

> +       if (page)

> +               set_memory_ro((unsigned long)page & PAGE_MASK, 1);

>

> -       return aarch64_insn_patch_text(addrs, insns, 1);

> +       return page;

>  }

>

>  /* arm kprobe: install breakpoint in text */

> --

> 2.19.1

>
Masami Hiramatsu (Google) Oct. 15, 2018, 12:27 p.m. UTC | #2
On Mon, 15 Oct 2018 13:22:12 +0200
Ard Biesheuvel <ard.biesheuvel@linaro.org> wrote:

> (+ Masami)

> 

> On 15 October 2018 at 13:16, Anders Roxell <anders.roxell@linaro.org> wrote:

> > Commit 1404d6f13e47 ("arm64: dump: Add checking for writable and exectuable pages")

> > has successfully identified code that leaves a page with W+X

> > permissions.

> >

> > [    3.245140] arm64/mm: Found insecure W+X mapping at address (____ptrval____)/0xffff000000d90000

> > [    3.245771] WARNING: CPU: 0 PID: 1 at ../arch/arm64/mm/dump.c:232 note_page+0x410/0x420

> > [    3.246141] Modules linked in:

> > [    3.246653] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.0-rc5-next-20180928-00001-ge70ae259b853-dirty #62

> > [    3.247008] Hardware name: linux,dummy-virt (DT)

> > [    3.247347] pstate: 80000005 (Nzcv daif -PAN -UAO)

> > [    3.247623] pc : note_page+0x410/0x420

> > [    3.247898] lr : note_page+0x410/0x420

> > [    3.248071] sp : ffff00000804bcd0

> > [    3.248254] x29: ffff00000804bcd0 x28: ffff000009274000

> > [    3.248578] x27: ffff00000921a000 x26: ffff80007dfff000

> > [    3.248845] x25: ffff0000093f5000 x24: ffff000009526f6a

> > [    3.249109] x23: 0000000000000004 x22: ffff000000d91000

> > [    3.249396] x21: ffff000000d90000 x20: 0000000000000000

> > [    3.249661] x19: ffff00000804bde8 x18: 0000000000000400

> > [    3.249924] x17: 0000000000000000 x16: 0000000000000000

> > [    3.250271] x15: ffffffffffffffff x14: 295f5f5f5f6c6176

> > [    3.250594] x13: 7274705f5f5f5f28 x12: 2073736572646461

> > [    3.250941] x11: 20746120676e6970 x10: 70616d20582b5720

> > [    3.251252] x9 : 6572756365736e69 x8 : 3039643030303030

> > [    3.251519] x7 : 306666666678302f x6 : ffff0000095467b2

> > [    3.251802] x5 : 0000000000000000 x4 : 0000000000000000

> > [    3.252060] x3 : 0000000000000000 x2 : ffffffffffffffff

> > [    3.252323] x1 : 4d151327adc50b00 x0 : 0000000000000000

> > [    3.252664] Call trace:

> > [    3.252953]  note_page+0x410/0x420

> > [    3.253186]  walk_pgd+0x12c/0x238

> > [    3.253417]  ptdump_check_wx+0x68/0xf8

> > [    3.253637]  mark_rodata_ro+0x68/0x98

> > [    3.253847]  kernel_init+0x38/0x160

> > [    3.254103]  ret_from_fork+0x10/0x18

> >

> > Reworked to that when allocate a page it sets mode RO. Inspired by

> > commit 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use text_poke()").


Yeah, that looks similar issue on x86, and looks good to me.
Thank you for fixing it!

Acked-by: Masami Hiramatsu <mhiramat@kernel.org>



> >

> > Cc: Arnd Bergmann <arnd@arndb.de>

> > Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> > Cc: Laura Abbott <labbott@redhat.com>

> > Cc: Catalin Marinas <catalin.marinas@arm.com>

> > Co-developed-by: Arnd Bergmann <arnd@arndb.de>

> > Co-developed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> > Signed-off-by: Anders Roxell <anders.roxell@linaro.org>

> > ---

> >  arch/arm64/kernel/probes/kprobes.c | 27 ++++++++++++++++++++-------

> >  1 file changed, 20 insertions(+), 7 deletions(-)

> >

> > diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c

> > index 9b65132e789a..b842e908b423 100644

> > --- a/arch/arm64/kernel/probes/kprobes.c

> > +++ b/arch/arm64/kernel/probes/kprobes.c

> > @@ -23,7 +23,9 @@

> >  #include <linux/slab.h>

> >  #include <linux/stop_machine.h>

> >  #include <linux/sched/debug.h>

> > +#include <linux/set_memory.h>

> >  #include <linux/stringify.h>

> > +#include <linux/vmalloc.h>

> >  #include <asm/traps.h>

> >  #include <asm/ptrace.h>

> >  #include <asm/cacheflush.h>

> > @@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

> >  static void __kprobes

> >  post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);

> >

> > +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> > +{

> > +       void *addrs[1];

> > +       u32 insns[1];

> > +

> > +       addrs[0] = (void *)addr;

> > +       insns[0] = (u32)opcode;

> > +

> > +       return aarch64_insn_patch_text(addrs, insns, 1);

> > +}

> > +

> >  static void __kprobes arch_prepare_ss_slot(struct kprobe *p)

> >  {

> >         /* prepare insn slot */

> > -       p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);

> > +       patch_text(p->ainsn.api.insn, p->opcode);

> >

> >         flush_icache_range((uintptr_t) (p->ainsn.api.insn),

> >                            (uintptr_t) (p->ainsn.api.insn) +

> > @@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)

> >         return 0;

> >  }

> >

> > -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> > +void *alloc_insn_page(void)

> >  {

> > -       void *addrs[1];

> > -       u32 insns[1];

> > +       void *page;

> >

> > -       addrs[0] = (void *)addr;

> > -       insns[0] = (u32)opcode;

> > +       page = vmalloc_exec(PAGE_SIZE);

> > +       if (page)

> > +               set_memory_ro((unsigned long)page & PAGE_MASK, 1);

> >

> > -       return aarch64_insn_patch_text(addrs, insns, 1);

> > +       return page;

> >  }

> >

> >  /* arm kprobe: install breakpoint in text */

> > --

> > 2.19.1

> >



-- 
Masami Hiramatsu <mhiramat@kernel.org>
Laura Abbott Oct. 22, 2018, 10:56 a.m. UTC | #3
On 10/15/2018 04:16 AM, Anders Roxell wrote:
> Commit 1404d6f13e47 ("arm64: dump: Add checking for writable and exectuable pages")

> has successfully identified code that leaves a page with W+X

> permissions.

> 

> [    3.245140] arm64/mm: Found insecure W+X mapping at address (____ptrval____)/0xffff000000d90000

> [    3.245771] WARNING: CPU: 0 PID: 1 at ../arch/arm64/mm/dump.c:232 note_page+0x410/0x420

> [    3.246141] Modules linked in:

> [    3.246653] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.0-rc5-next-20180928-00001-ge70ae259b853-dirty #62

> [    3.247008] Hardware name: linux,dummy-virt (DT)

> [    3.247347] pstate: 80000005 (Nzcv daif -PAN -UAO)

> [    3.247623] pc : note_page+0x410/0x420

> [    3.247898] lr : note_page+0x410/0x420

> [    3.248071] sp : ffff00000804bcd0

> [    3.248254] x29: ffff00000804bcd0 x28: ffff000009274000

> [    3.248578] x27: ffff00000921a000 x26: ffff80007dfff000

> [    3.248845] x25: ffff0000093f5000 x24: ffff000009526f6a

> [    3.249109] x23: 0000000000000004 x22: ffff000000d91000

> [    3.249396] x21: ffff000000d90000 x20: 0000000000000000

> [    3.249661] x19: ffff00000804bde8 x18: 0000000000000400

> [    3.249924] x17: 0000000000000000 x16: 0000000000000000

> [    3.250271] x15: ffffffffffffffff x14: 295f5f5f5f6c6176

> [    3.250594] x13: 7274705f5f5f5f28 x12: 2073736572646461

> [    3.250941] x11: 20746120676e6970 x10: 70616d20582b5720

> [    3.251252] x9 : 6572756365736e69 x8 : 3039643030303030

> [    3.251519] x7 : 306666666678302f x6 : ffff0000095467b2

> [    3.251802] x5 : 0000000000000000 x4 : 0000000000000000

> [    3.252060] x3 : 0000000000000000 x2 : ffffffffffffffff

> [    3.252323] x1 : 4d151327adc50b00 x0 : 0000000000000000

> [    3.252664] Call trace:

> [    3.252953]  note_page+0x410/0x420

> [    3.253186]  walk_pgd+0x12c/0x238

> [    3.253417]  ptdump_check_wx+0x68/0xf8

> [    3.253637]  mark_rodata_ro+0x68/0x98

> [    3.253847]  kernel_init+0x38/0x160

> [    3.254103]  ret_from_fork+0x10/0x18

> 

> Reworked to that when allocate a page it sets mode RO. Inspired by

> commit 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use text_poke()").

> 


Reviewed-by: Laura Abbott <labbott@redhat.com>


> Cc: Arnd Bergmann <arnd@arndb.de>

> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> Cc: Laura Abbott <labbott@redhat.com>

> Cc: Catalin Marinas <catalin.marinas@arm.com>

> Co-developed-by: Arnd Bergmann <arnd@arndb.de>

> Co-developed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> Signed-off-by: Anders Roxell <anders.roxell@linaro.org>

> ---

>   arch/arm64/kernel/probes/kprobes.c | 27 ++++++++++++++++++++-------

>   1 file changed, 20 insertions(+), 7 deletions(-)

> 

> diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c

> index 9b65132e789a..b842e908b423 100644

> --- a/arch/arm64/kernel/probes/kprobes.c

> +++ b/arch/arm64/kernel/probes/kprobes.c

> @@ -23,7 +23,9 @@

>   #include <linux/slab.h>

>   #include <linux/stop_machine.h>

>   #include <linux/sched/debug.h>

> +#include <linux/set_memory.h>

>   #include <linux/stringify.h>

> +#include <linux/vmalloc.h>

>   #include <asm/traps.h>

>   #include <asm/ptrace.h>

>   #include <asm/cacheflush.h>

> @@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

>   static void __kprobes

>   post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);

>   

> +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> +{

> +	void *addrs[1];

> +	u32 insns[1];

> +

> +	addrs[0] = (void *)addr;

> +	insns[0] = (u32)opcode;

> +

> +	return aarch64_insn_patch_text(addrs, insns, 1);

> +}

> +

>   static void __kprobes arch_prepare_ss_slot(struct kprobe *p)

>   {

>   	/* prepare insn slot */

> -	p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);

> +	patch_text(p->ainsn.api.insn, p->opcode);

>   

>   	flush_icache_range((uintptr_t) (p->ainsn.api.insn),

>   			   (uintptr_t) (p->ainsn.api.insn) +

> @@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)

>   	return 0;

>   }

>   

> -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> +void *alloc_insn_page(void)

>   {

> -	void *addrs[1];

> -	u32 insns[1];

> +	void *page;

>   

> -	addrs[0] = (void *)addr;

> -	insns[0] = (u32)opcode;

> +	page = vmalloc_exec(PAGE_SIZE);

> +	if (page)

> +		set_memory_ro((unsigned long)page & PAGE_MASK, 1);

>   

> -	return aarch64_insn_patch_text(addrs, insns, 1);

> +	return page;

>   }

>   

>   /* arm kprobe: install breakpoint in text */

>
Will Deacon Oct. 29, 2018, 12:04 p.m. UTC | #4
Hi Anders,

On Mon, Oct 15, 2018 at 01:16:00PM +0200, Anders Roxell wrote:
> Commit 1404d6f13e47 ("arm64: dump: Add checking for writable and exectuable pages")

> has successfully identified code that leaves a page with W+X

> permissions.

> 

> [    3.245140] arm64/mm: Found insecure W+X mapping at address (____ptrval____)/0xffff000000d90000

> [    3.245771] WARNING: CPU: 0 PID: 1 at ../arch/arm64/mm/dump.c:232 note_page+0x410/0x420

> [    3.246141] Modules linked in:

> [    3.246653] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.0-rc5-next-20180928-00001-ge70ae259b853-dirty #62

> [    3.247008] Hardware name: linux,dummy-virt (DT)

> [    3.247347] pstate: 80000005 (Nzcv daif -PAN -UAO)

> [    3.247623] pc : note_page+0x410/0x420

> [    3.247898] lr : note_page+0x410/0x420

> [    3.248071] sp : ffff00000804bcd0

> [    3.248254] x29: ffff00000804bcd0 x28: ffff000009274000

> [    3.248578] x27: ffff00000921a000 x26: ffff80007dfff000

> [    3.248845] x25: ffff0000093f5000 x24: ffff000009526f6a

> [    3.249109] x23: 0000000000000004 x22: ffff000000d91000

> [    3.249396] x21: ffff000000d90000 x20: 0000000000000000

> [    3.249661] x19: ffff00000804bde8 x18: 0000000000000400

> [    3.249924] x17: 0000000000000000 x16: 0000000000000000

> [    3.250271] x15: ffffffffffffffff x14: 295f5f5f5f6c6176

> [    3.250594] x13: 7274705f5f5f5f28 x12: 2073736572646461

> [    3.250941] x11: 20746120676e6970 x10: 70616d20582b5720

> [    3.251252] x9 : 6572756365736e69 x8 : 3039643030303030

> [    3.251519] x7 : 306666666678302f x6 : ffff0000095467b2

> [    3.251802] x5 : 0000000000000000 x4 : 0000000000000000

> [    3.252060] x3 : 0000000000000000 x2 : ffffffffffffffff

> [    3.252323] x1 : 4d151327adc50b00 x0 : 0000000000000000

> [    3.252664] Call trace:

> [    3.252953]  note_page+0x410/0x420

> [    3.253186]  walk_pgd+0x12c/0x238

> [    3.253417]  ptdump_check_wx+0x68/0xf8

> [    3.253637]  mark_rodata_ro+0x68/0x98

> [    3.253847]  kernel_init+0x38/0x160

> [    3.254103]  ret_from_fork+0x10/0x18

> 

> Reworked to that when allocate a page it sets mode RO. Inspired by

> commit 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use text_poke()").

> 

> Cc: Arnd Bergmann <arnd@arndb.de>

> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> Cc: Laura Abbott <labbott@redhat.com>

> Cc: Catalin Marinas <catalin.marinas@arm.com>

> Co-developed-by: Arnd Bergmann <arnd@arndb.de>

> Co-developed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> Signed-off-by: Anders Roxell <anders.roxell@linaro.org>

> ---

>  arch/arm64/kernel/probes/kprobes.c | 27 ++++++++++++++++++++-------

>  1 file changed, 20 insertions(+), 7 deletions(-)

> 

> diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c

> index 9b65132e789a..b842e908b423 100644

> --- a/arch/arm64/kernel/probes/kprobes.c

> +++ b/arch/arm64/kernel/probes/kprobes.c

> @@ -23,7 +23,9 @@

>  #include <linux/slab.h>

>  #include <linux/stop_machine.h>

>  #include <linux/sched/debug.h>

> +#include <linux/set_memory.h>

>  #include <linux/stringify.h>

> +#include <linux/vmalloc.h>

>  #include <asm/traps.h>

>  #include <asm/ptrace.h>

>  #include <asm/cacheflush.h>

> @@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

>  static void __kprobes

>  post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);

>  

> +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> +{

> +	void *addrs[1];

> +	u32 insns[1];

> +

> +	addrs[0] = (void *)addr;

> +	insns[0] = (u32)opcode;

> +

> +	return aarch64_insn_patch_text(addrs, insns, 1);

> +}

> +

>  static void __kprobes arch_prepare_ss_slot(struct kprobe *p)

>  {

>  	/* prepare insn slot */

> -	p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);

> +	patch_text(p->ainsn.api.insn, p->opcode);

>  

>  	flush_icache_range((uintptr_t) (p->ainsn.api.insn),

>  			   (uintptr_t) (p->ainsn.api.insn) +

> @@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)

>  	return 0;

>  }

>  

> -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> +void *alloc_insn_page(void)

>  {

> -	void *addrs[1];

> -	u32 insns[1];

> +	void *page;

>  

> -	addrs[0] = (void *)addr;

> -	insns[0] = (u32)opcode;

> +	page = vmalloc_exec(PAGE_SIZE);

> +	if (page)

> +		set_memory_ro((unsigned long)page & PAGE_MASK, 1);


This looks a bit strange to me -- you're allocating PAGE_SIZE bytes so
that we can adjust the permissions, yet we can't guarantee that page is
actually page-aligned and therefore end up explicitly masking down.

In which case allocating an entire page isn't actually helping us, and
we could end up racing with somebody else changing permission on the
same page afaict.

I think we need to ensure we really have an entire page, perhaps using
vmap() instead? Or have I missed some subtle detail here?

Cheers,

Will
Arnd Bergmann Oct. 29, 2018, 12:11 p.m. UTC | #5
On 10/29/18, Will Deacon <will.deacon@arm.com> wrote:
> On Mon, Oct 15, 2018 at 01:16:00PM +0200, Anders Roxell wrote:


>> -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

>> +void *alloc_insn_page(void)

>>  {

>> -	void *addrs[1];

>> -	u32 insns[1];

>> +	void *page;

>>

>> -	addrs[0] = (void *)addr;

>> -	insns[0] = (u32)opcode;

>> +	page = vmalloc_exec(PAGE_SIZE);

>> +	if (page)

>> +		set_memory_ro((unsigned long)page & PAGE_MASK, 1);

>

> This looks a bit strange to me -- you're allocating PAGE_SIZE bytes so

> that we can adjust the permissions, yet we can't guarantee that page is

> actually page-aligned and therefore end up explicitly masking down.

>

> In which case allocating an entire page isn't actually helping us, and

> we could end up racing with somebody else changing permission on the

> same page afaict.

>

> I think we need to ensure we really have an entire page, perhaps using

> vmap() instead? Or have I missed some subtle detail here?


I'm fairly sure that vmalloc() and vmalloc_exec() is guaranteed to be page
aligned everywhere. The documentation is a bit vague here, but I'm
still confident enough that we can make that assumption based on

/**
 *      vmalloc_exec  -  allocate virtually contiguous, executable memory
 *      @size:          allocation size
 *
 *      Kernel-internal function to allocate enough pages to cover @size
 *      the page level allocator and map them into contiguous and
 *      executable kernel virtual space.
 *
 *      For tight control over page level allocator and protection flags
 *      use __vmalloc() instead.
 */
void *vmalloc_exec(unsigned long size)


       Arnd
Will Deacon Oct. 29, 2018, 5:02 p.m. UTC | #6
On Mon, Oct 29, 2018 at 01:11:24PM +0100, Arnd Bergmann wrote:
> On 10/29/18, Will Deacon <will.deacon@arm.com> wrote:

> > On Mon, Oct 15, 2018 at 01:16:00PM +0200, Anders Roxell wrote:

> 

> >> -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)

> >> +void *alloc_insn_page(void)

> >>  {

> >> -	void *addrs[1];

> >> -	u32 insns[1];

> >> +	void *page;

> >>

> >> -	addrs[0] = (void *)addr;

> >> -	insns[0] = (u32)opcode;

> >> +	page = vmalloc_exec(PAGE_SIZE);

> >> +	if (page)

> >> +		set_memory_ro((unsigned long)page & PAGE_MASK, 1);

> >

> > This looks a bit strange to me -- you're allocating PAGE_SIZE bytes so

> > that we can adjust the permissions, yet we can't guarantee that page is

> > actually page-aligned and therefore end up explicitly masking down.

> >

> > In which case allocating an entire page isn't actually helping us, and

> > we could end up racing with somebody else changing permission on the

> > same page afaict.

> >

> > I think we need to ensure we really have an entire page, perhaps using

> > vmap() instead? Or have I missed some subtle detail here?

> 

> I'm fairly sure that vmalloc() and vmalloc_exec() is guaranteed to be page

> aligned everywhere. The documentation is a bit vague here, but I'm

> still confident enough that we can make that assumption based on

> 

> /**

>  *      vmalloc_exec  -  allocate virtually contiguous, executable memory

>  *      @size:          allocation size

>  *

>  *      Kernel-internal function to allocate enough pages to cover @size

>  *      the page level allocator and map them into contiguous and

>  *      executable kernel virtual space.

>  *

>  *      For tight control over page level allocator and protection flags

>  *      use __vmalloc() instead.

>  */

> void *vmalloc_exec(unsigned long size)


FWIW, I did a bit of digging and I agree with your conclusion. vmalloc()
allocations end up getting installed in map_vm_area() via
__vmalloc_area_node(), which allocates things a page at a time.

So we can simplify this patch to drop the masking when calling
set_memory_ro().

Will
diff mbox series

Patch

diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 9b65132e789a..b842e908b423 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -23,7 +23,9 @@ 
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
 #include <linux/sched/debug.h>
+#include <linux/set_memory.h>
 #include <linux/stringify.h>
+#include <linux/vmalloc.h>
 #include <asm/traps.h>
 #include <asm/ptrace.h>
 #include <asm/cacheflush.h>
@@ -42,10 +44,21 @@  DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 static void __kprobes
 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
 
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+	void *addrs[1];
+	u32 insns[1];
+
+	addrs[0] = (void *)addr;
+	insns[0] = (u32)opcode;
+
+	return aarch64_insn_patch_text(addrs, insns, 1);
+}
+
 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
 {
 	/* prepare insn slot */
-	p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
+	patch_text(p->ainsn.api.insn, p->opcode);
 
 	flush_icache_range((uintptr_t) (p->ainsn.api.insn),
 			   (uintptr_t) (p->ainsn.api.insn) +
@@ -118,15 +131,15 @@  int __kprobes arch_prepare_kprobe(struct kprobe *p)
 	return 0;
 }
 
-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+void *alloc_insn_page(void)
 {
-	void *addrs[1];
-	u32 insns[1];
+	void *page;
 
-	addrs[0] = (void *)addr;
-	insns[0] = (u32)opcode;
+	page = vmalloc_exec(PAGE_SIZE);
+	if (page)
+		set_memory_ro((unsigned long)page & PAGE_MASK, 1);
 
-	return aarch64_insn_patch_text(addrs, insns, 1);
+	return page;
 }
 
 /* arm kprobe: install breakpoint in text */