diff mbox

[RFCv3,01/14] arm64: introduce aarch64_insn_gen_comp_branch_imm()

Message ID 1405405512-4423-2-git-send-email-zlim.lnx@gmail.com
State New
Headers show

Commit Message

Zi Shen Lim July 15, 2014, 6:24 a.m. UTC
Introduce function to generate compare & branch (immediate)
instructions.

Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
---
 arch/arm64/include/asm/insn.h | 57 ++++++++++++++++++++++++++++
 arch/arm64/kernel/insn.c      | 86 ++++++++++++++++++++++++++++++++++++++++---
 2 files changed, 138 insertions(+), 5 deletions(-)

Comments

Will Deacon July 16, 2014, 4:04 p.m. UTC | #1
On Tue, Jul 15, 2014 at 07:24:59AM +0100, Zi Shen Lim wrote:
> Introduce function to generate compare & branch (immediate)
> instructions.
> 
> Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
> ---
>  arch/arm64/include/asm/insn.h | 57 ++++++++++++++++++++++++++++
>  arch/arm64/kernel/insn.c      | 86 ++++++++++++++++++++++++++++++++++++++++---
>  2 files changed, 138 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
> index dc1f73b..a98c495 100644
> --- a/arch/arm64/include/asm/insn.h
> +++ b/arch/arm64/include/asm/insn.h
> @@ -2,6 +2,8 @@
>   * Copyright (C) 2013 Huawei Ltd.
>   * Author: Jiang Liu <liuj97@gmail.com>
>   *
> + * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
> + *
>   * This program is free software; you can redistribute it and/or modify
>   * it under the terms of the GNU General Public License version 2 as
>   * published by the Free Software Foundation.
> @@ -67,9 +69,58 @@ enum aarch64_insn_imm_type {
>  	AARCH64_INSN_IMM_MAX
>  };
>  
> +enum aarch64_insn_register_type {
> +	AARCH64_INSN_REGTYPE_RT,
> +};
> +
> +enum aarch64_insn_register {
> +	AARCH64_INSN_REG_0  = 0,
> +	AARCH64_INSN_REG_1  = 1,
> +	AARCH64_INSN_REG_2  = 2,
> +	AARCH64_INSN_REG_3  = 3,
> +	AARCH64_INSN_REG_4  = 4,
> +	AARCH64_INSN_REG_5  = 5,
> +	AARCH64_INSN_REG_6  = 6,
> +	AARCH64_INSN_REG_7  = 7,
> +	AARCH64_INSN_REG_8  = 8,
> +	AARCH64_INSN_REG_9  = 9,
> +	AARCH64_INSN_REG_10 = 10,
> +	AARCH64_INSN_REG_11 = 11,
> +	AARCH64_INSN_REG_12 = 12,
> +	AARCH64_INSN_REG_13 = 13,
> +	AARCH64_INSN_REG_14 = 14,
> +	AARCH64_INSN_REG_15 = 15,
> +	AARCH64_INSN_REG_16 = 16,
> +	AARCH64_INSN_REG_17 = 17,
> +	AARCH64_INSN_REG_18 = 18,
> +	AARCH64_INSN_REG_19 = 19,
> +	AARCH64_INSN_REG_20 = 20,
> +	AARCH64_INSN_REG_21 = 21,
> +	AARCH64_INSN_REG_22 = 22,
> +	AARCH64_INSN_REG_23 = 23,
> +	AARCH64_INSN_REG_24 = 24,
> +	AARCH64_INSN_REG_25 = 25,
> +	AARCH64_INSN_REG_26 = 26,
> +	AARCH64_INSN_REG_27 = 27,
> +	AARCH64_INSN_REG_28 = 28,
> +	AARCH64_INSN_REG_29 = 29,
> +	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
> +	AARCH64_INSN_REG_30 = 30,
> +	AARCH64_INSN_REG_LR = 30, /* Link register */
> +	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
> +	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */

Can you just #define AARCH64_INSN_REG(x) instead, then have some magic
values like ARM64_REG_LR which are defined as the appropriate numbers?

> +};
> +
> +enum aarch64_insn_variant {
> +	AARCH64_INSN_VARIANT_32BIT,
> +	AARCH64_INSN_VARIANT_64BIT
> +};
> +
>  enum aarch64_insn_branch_type {
>  	AARCH64_INSN_BRANCH_NOLINK,
>  	AARCH64_INSN_BRANCH_LINK,
> +	AARCH64_INSN_BRANCH_COMP_ZERO,
> +	AARCH64_INSN_BRANCH_COMP_NONZERO,
>  };
>  
>  #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
> @@ -80,6 +131,8 @@ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
>  
>  __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
>  __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
> +__AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
> +__AARCH64_INSN_FUNCS(cbnz,	0xFE000000, 0x35000000)
>  __AARCH64_INSN_FUNCS(svc,	0xFFE0001F, 0xD4000001)
>  __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
>  __AARCH64_INSN_FUNCS(smc,	0xFFE0001F, 0xD4000003)
> @@ -97,6 +150,10 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
>  				  u32 insn, u64 imm);
>  u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
>  				enum aarch64_insn_branch_type type);
> +u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
> +				     enum aarch64_insn_register reg,
> +				     enum aarch64_insn_variant variant,
> +				     enum aarch64_insn_branch_type type);
>  u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
>  u32 aarch64_insn_gen_nop(void);
>  
> diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
> index 92f3683..d01bb4e 100644
> --- a/arch/arm64/kernel/insn.c
> +++ b/arch/arm64/kernel/insn.c
> @@ -2,6 +2,8 @@
>   * Copyright (C) 2013 Huawei Ltd.
>   * Author: Jiang Liu <liuj97@gmail.com>
>   *
> + * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
> + *
>   * This program is free software; you can redistribute it and/or modify
>   * it under the terms of the GNU General Public License version 2 as
>   * published by the Free Software Foundation.
> @@ -264,10 +266,36 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
>  	return insn;
>  }
>  
> -u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
> -					  enum aarch64_insn_branch_type type)
> +static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
> +					u32 insn,
> +					enum aarch64_insn_register reg)
> +{
> +	int shift;
> +
> +	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
> +		pr_err("%s: unknown register encoding %d\n", __func__, reg);
> +		return 0;
> +	}
> +
> +	switch (type) {
> +	case AARCH64_INSN_REGTYPE_RT:
> +		shift = 0;
> +		break;
> +	default:
> +		pr_err("%s: unknown register type encoding %d\n", __func__,
> +		       type);
> +		return 0;
> +	}
> +
> +	insn &= ~(GENMASK(4, 0) << shift);
> +	insn |= reg << shift;
> +
> +	return insn;
> +}
> +
> +static inline long branch_imm_common(unsigned long pc, unsigned long addr,
> +				     long range)
>  {
> -	u32 insn;
>  	long offset;
>  
>  	/*
> @@ -276,13 +304,24 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
>  	 */
>  	BUG_ON((pc & 0x3) || (addr & 0x3));
>  
> +	offset = ((long)addr - (long)pc);
> +	BUG_ON(offset < -range || offset >= range);
> +
> +	return offset;
> +}
> +
> +u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
> +					  enum aarch64_insn_branch_type type)
> +{
> +	u32 insn;
> +	long offset;
> +
>  	/*
>  	 * B/BL support [-128M, 128M) offset
>  	 * ARM64 virtual address arrangement guarantees all kernel and module
>  	 * texts are within +/-128M.
>  	 */
> -	offset = ((long)addr - (long)pc);
> -	BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
> +	offset = branch_imm_common(pc, addr, SZ_128M);
>  
>  	if (type == AARCH64_INSN_BRANCH_LINK)
>  		insn = aarch64_insn_get_bl_value();
> @@ -293,6 +332,43 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
>  					     offset >> 2);
>  }
>  
> +u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
> +				     enum aarch64_insn_register reg,
> +				     enum aarch64_insn_variant variant,
> +				     enum aarch64_insn_branch_type type)
> +{
> +	u32 insn;
> +	long offset;
> +
> +	offset = branch_imm_common(pc, addr, SZ_1M);
> +
> +	switch (type) {
> +	case AARCH64_INSN_BRANCH_COMP_ZERO:
> +		insn = aarch64_insn_get_cbz_value();
> +		break;
> +	case AARCH64_INSN_BRANCH_COMP_NONZERO:
> +		insn = aarch64_insn_get_cbnz_value();
> +		break;
> +	default:
> +		BUG_ON(1);
> +	}
> +
> +	switch (variant) {
> +	case AARCH64_INSN_VARIANT_32BIT:
> +		break;
> +	case AARCH64_INSN_VARIANT_64BIT:
> +		insn |= BIT(31);

FWIW, that bit (31) is referred to as the `SF' bit in the instruction
encodings (for Sixty-Four). You could have a #define for that to help people
match up the bitfield, if you like.

> +		break;
> +	default:
> +		BUG_ON(1);

Is a BUG_ON justifiable here? Is there not a nicer way to fail?

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Zi Shen Lim July 16, 2014, 9:19 p.m. UTC | #2
On Wed, Jul 16, 2014 at 05:04:50PM +0100, Will Deacon wrote:
> On Tue, Jul 15, 2014 at 07:24:59AM +0100, Zi Shen Lim wrote:
[...]
> > +enum aarch64_insn_register {
> > +	AARCH64_INSN_REG_0  = 0,
> > +	AARCH64_INSN_REG_1  = 1,
> > +	AARCH64_INSN_REG_2  = 2,
> > +	AARCH64_INSN_REG_3  = 3,
> > +	AARCH64_INSN_REG_4  = 4,
> > +	AARCH64_INSN_REG_5  = 5,
> > +	AARCH64_INSN_REG_6  = 6,
> > +	AARCH64_INSN_REG_7  = 7,
> > +	AARCH64_INSN_REG_8  = 8,
> > +	AARCH64_INSN_REG_9  = 9,
> > +	AARCH64_INSN_REG_10 = 10,
> > +	AARCH64_INSN_REG_11 = 11,
> > +	AARCH64_INSN_REG_12 = 12,
> > +	AARCH64_INSN_REG_13 = 13,
> > +	AARCH64_INSN_REG_14 = 14,
> > +	AARCH64_INSN_REG_15 = 15,
> > +	AARCH64_INSN_REG_16 = 16,
> > +	AARCH64_INSN_REG_17 = 17,
> > +	AARCH64_INSN_REG_18 = 18,
> > +	AARCH64_INSN_REG_19 = 19,
> > +	AARCH64_INSN_REG_20 = 20,
> > +	AARCH64_INSN_REG_21 = 21,
> > +	AARCH64_INSN_REG_22 = 22,
> > +	AARCH64_INSN_REG_23 = 23,
> > +	AARCH64_INSN_REG_24 = 24,
> > +	AARCH64_INSN_REG_25 = 25,
> > +	AARCH64_INSN_REG_26 = 26,
> > +	AARCH64_INSN_REG_27 = 27,
> > +	AARCH64_INSN_REG_28 = 28,
> > +	AARCH64_INSN_REG_29 = 29,
> > +	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
> > +	AARCH64_INSN_REG_30 = 30,
> > +	AARCH64_INSN_REG_LR = 30, /* Link register */
> > +	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
> > +	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */
> 
> Can you just #define AARCH64_INSN_REG(x) instead, then have some magic
> values like ARM64_REG_LR which are defined as the appropriate numbers?

I actually had something like what you mentioned in the beginning, but
decided to go with the above - thinking that it's clearer to present
the complete set of valid register definitions.

The #define can still be added for convenience, though I think it's also a
potential source of errors - it's much easier to typo something like
AARCH64_INSN_REG(32) and not get caught.

[...]
> > +	switch (variant) {
> > +	case AARCH64_INSN_VARIANT_32BIT:
> > +		break;
> > +	case AARCH64_INSN_VARIANT_64BIT:
> > +		insn |= BIT(31);
> 
> FWIW, that bit (31) is referred to as the `SF' bit in the instruction
> encodings (for Sixty-Four). You could have a #define for that to help people
> match up the bitfield, if you like.

Something like this?

	#define AARCH64_INSN_SF_BIT  BIT(31)

	...

	case AARCH64_INSN_VARIANT_64BIT:
		insn |= AARCH64_INSN_SF_BIT;

In the case of bitfield instruction, there's also an "N" bit.
So something like this?

	#define AARCH64_INSN_N_BIT  BIT(22)

	...

	case AARCH64_INSN_VARIANT_64BIT:
		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;

> 
> > +		break;
> > +	default:
> > +		BUG_ON(1);
> 
> Is a BUG_ON justifiable here? Is there not a nicer way to fail?

In general, it'd be nice if we returned something like -EINVAL and
have all callers handle failures. Today all code gen functions return
the u32 instruction and there's no error handling by callers.
I think following the precedence (aarch64_insn_gen_branch_imm())
of failing with BUG_ON is a reasonable tradeoff.

In this case here, when we hit the default (failure) case, that means
there's a serious error of attempting to use an unsupported
variant. I think we're better off failing hard here than trying to
arbitrarily "fallback" on a default choice.

One potential option instead of switch (variant) is:

	if (variant == AARCH64_INSN_VARIANT_64BIT)
		/* do something */
	else
		/* do something else */

which would be quite reasonable to do as we only have VARIANT_{32,64}BIT
today.

However, consider the case where we add VARIANT_128BIT or other flavors
in the future. The if/else option (basically defaulting to VARIANT_32BIT)
would then make much less sense.

> 
> Will
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Will Deacon July 17, 2014, 9:19 a.m. UTC | #3
On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
> On Wed, Jul 16, 2014 at 05:04:50PM +0100, Will Deacon wrote:
> > On Tue, Jul 15, 2014 at 07:24:59AM +0100, Zi Shen Lim wrote:
> [...]
> > > +enum aarch64_insn_register {
> > > +	AARCH64_INSN_REG_0  = 0,
> > > +	AARCH64_INSN_REG_1  = 1,
> > > +	AARCH64_INSN_REG_2  = 2,
> > > +	AARCH64_INSN_REG_3  = 3,
> > > +	AARCH64_INSN_REG_4  = 4,
> > > +	AARCH64_INSN_REG_5  = 5,
> > > +	AARCH64_INSN_REG_6  = 6,
> > > +	AARCH64_INSN_REG_7  = 7,
> > > +	AARCH64_INSN_REG_8  = 8,
> > > +	AARCH64_INSN_REG_9  = 9,
> > > +	AARCH64_INSN_REG_10 = 10,
> > > +	AARCH64_INSN_REG_11 = 11,
> > > +	AARCH64_INSN_REG_12 = 12,
> > > +	AARCH64_INSN_REG_13 = 13,
> > > +	AARCH64_INSN_REG_14 = 14,
> > > +	AARCH64_INSN_REG_15 = 15,
> > > +	AARCH64_INSN_REG_16 = 16,
> > > +	AARCH64_INSN_REG_17 = 17,
> > > +	AARCH64_INSN_REG_18 = 18,
> > > +	AARCH64_INSN_REG_19 = 19,
> > > +	AARCH64_INSN_REG_20 = 20,
> > > +	AARCH64_INSN_REG_21 = 21,
> > > +	AARCH64_INSN_REG_22 = 22,
> > > +	AARCH64_INSN_REG_23 = 23,
> > > +	AARCH64_INSN_REG_24 = 24,
> > > +	AARCH64_INSN_REG_25 = 25,
> > > +	AARCH64_INSN_REG_26 = 26,
> > > +	AARCH64_INSN_REG_27 = 27,
> > > +	AARCH64_INSN_REG_28 = 28,
> > > +	AARCH64_INSN_REG_29 = 29,
> > > +	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
> > > +	AARCH64_INSN_REG_30 = 30,
> > > +	AARCH64_INSN_REG_LR = 30, /* Link register */
> > > +	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
> > > +	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */
> > 
> > Can you just #define AARCH64_INSN_REG(x) instead, then have some magic
> > values like ARM64_REG_LR which are defined as the appropriate numbers?
> 
> I actually had something like what you mentioned in the beginning, but
> decided to go with the above - thinking that it's clearer to present
> the complete set of valid register definitions.
> 
> The #define can still be added for convenience, though I think it's also a
> potential source of errors - it's much easier to typo something like
> AARCH64_INSN_REG(32) and not get caught.

Fair enough, that's a good enough reason to leave it like it is.

> [...]
> > > +	switch (variant) {
> > > +	case AARCH64_INSN_VARIANT_32BIT:
> > > +		break;
> > > +	case AARCH64_INSN_VARIANT_64BIT:
> > > +		insn |= BIT(31);
> > 
> > FWIW, that bit (31) is referred to as the `SF' bit in the instruction
> > encodings (for Sixty-Four). You could have a #define for that to help people
> > match up the bitfield, if you like.
> 
> Something like this?
> 
> 	#define AARCH64_INSN_SF_BIT  BIT(31)
> 
> 	...
> 
> 	case AARCH64_INSN_VARIANT_64BIT:
> 		insn |= AARCH64_INSN_SF_BIT;
> 
> In the case of bitfield instruction, there's also an "N" bit.
> So something like this?
> 
> 	#define AARCH64_INSN_N_BIT  BIT(22)
> 
> 	...
> 
> 	case AARCH64_INSN_VARIANT_64BIT:
> 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;

Looks good.

> > 
> > > +		break;
> > > +	default:
> > > +		BUG_ON(1);
> > 
> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
> 
> In general, it'd be nice if we returned something like -EINVAL and
> have all callers handle failures. Today all code gen functions return
> the u32 instruction and there's no error handling by callers.
> I think following the precedence (aarch64_insn_gen_branch_imm())
> of failing with BUG_ON is a reasonable tradeoff.

Well, I don't necessarily agree with that BUG_ON, either :)
I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
could generate that and avoid having to propagate errors directly to the
caller.

> In this case here, when we hit the default (failure) case, that means
> there's a serious error of attempting to use an unsupported
> variant. I think we're better off failing hard here than trying to
> arbitrarily "fallback" on a default choice.

It might be a serious error for BPF, but a BUG_ON brings down the entire
machine, which I think is unfortunate.

> 
> One potential option instead of switch (variant) is:
> 
> 	if (variant == AARCH64_INSN_VARIANT_64BIT)
> 		/* do something */
> 	else
> 		/* do something else */
> 
> which would be quite reasonable to do as we only have VARIANT_{32,64}BIT
> today.
> 
> However, consider the case where we add VARIANT_128BIT or other flavors
> in the future. The if/else option (basically defaulting to VARIANT_32BIT)
> would then make much less sense.

I don't think we need to worry about hypothetical extensions to the
instruction set at this stage.

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Alexei Starovoitov July 17, 2014, 3:59 p.m. UTC | #4
On Thu, Jul 17, 2014 at 2:19 AM, Will Deacon <will.deacon@arm.com> wrote:
> On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
>> >
>> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
>>
>> In general, it'd be nice if we returned something like -EINVAL and
>> have all callers handle failures. Today all code gen functions return
>> the u32 instruction and there's no error handling by callers.
>> I think following the precedence (aarch64_insn_gen_branch_imm())
>> of failing with BUG_ON is a reasonable tradeoff.
>
> Well, I don't necessarily agree with that BUG_ON, either :)
> I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
> could generate that and avoid having to propagate errors directly to the
> caller.
>
>> In this case here, when we hit the default (failure) case, that means
>> there's a serious error of attempting to use an unsupported
>> variant. I think we're better off failing hard here than trying to
>> arbitrarily "fallback" on a default choice.
>
> It might be a serious error for BPF, but a BUG_ON brings down the entire
> machine, which I think is unfortunate.

There is some misunderstanding here. Here BUG_ON will trigger
only on actual bug in JIT implementation, it cannot be triggered by user.
eBPF program is verified before it reaches JIT, so all instructions are
valid and input to JIT is proper. Two instruction are not yet
implemented in this JIT and they trigger pr_.._once().
So I don't see any issue with this usage of BUG_ON
imo living with silent bugs in JIT is more dangerous.

For the same reason there is no 'trap' instruction in eBPF.
Static verifier checks that program is valid. If there was a 'trap'
insn the program would be rejected. Like programs with
'div by zero' are rejected. There is normal 'bpf_exit' insn to
return from the program.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Will Deacon July 17, 2014, 5:25 p.m. UTC | #5
On Thu, Jul 17, 2014 at 04:59:10PM +0100, Alexei Starovoitov wrote:
> On Thu, Jul 17, 2014 at 2:19 AM, Will Deacon <will.deacon@arm.com> wrote:
> > On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
> >> >
> >> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
> >>
> >> In general, it'd be nice if we returned something like -EINVAL and
> >> have all callers handle failures. Today all code gen functions return
> >> the u32 instruction and there's no error handling by callers.
> >> I think following the precedence (aarch64_insn_gen_branch_imm())
> >> of failing with BUG_ON is a reasonable tradeoff.
> >
> > Well, I don't necessarily agree with that BUG_ON, either :)
> > I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
> > could generate that and avoid having to propagate errors directly to the
> > caller.
> >
> >> In this case here, when we hit the default (failure) case, that means
> >> there's a serious error of attempting to use an unsupported
> >> variant. I think we're better off failing hard here than trying to
> >> arbitrarily "fallback" on a default choice.
> >
> > It might be a serious error for BPF, but a BUG_ON brings down the entire
> > machine, which I think is unfortunate.
> 
> There is some misunderstanding here. Here BUG_ON will trigger
> only on actual bug in JIT implementation, it cannot be triggered by user.
> eBPF program is verified before it reaches JIT, so all instructions are
> valid and input to JIT is proper. Two instruction are not yet
> implemented in this JIT and they trigger pr_.._once().
> So I don't see any issue with this usage of BUG_ON
> imo living with silent bugs in JIT is more dangerous.
> 
> For the same reason there is no 'trap' instruction in eBPF.
> Static verifier checks that program is valid. If there was a 'trap'
> insn the program would be rejected. Like programs with
> 'div by zero' are rejected. There is normal 'bpf_exit' insn to
> return from the program.

Ok, so assuming that BPF doesn't have any issues, I take your point.
However, we could very easily re-use these functions for things like SMP
alternatives and kprobes, where simply failing the instruction generation
might be acceptable.

It just feels like a bit hammer to me, when the machine is probably happily
scheduling user tasks, responding to interrupts, writing data to disk etc.

Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
Zi Shen Lim July 18, 2014, 5:44 a.m. UTC | #6
(resending this email in case the first one got caught in your spam
filter. sorry.)

On Thu, Jul 17, 2014 at 06:25:26PM +0100, Will Deacon wrote:
> On Thu, Jul 17, 2014 at 04:59:10PM +0100, Alexei Starovoitov wrote:
> > On Thu, Jul 17, 2014 at 2:19 AM, Will Deacon <will.deacon@arm.com> wrote:
> > > On Wed, Jul 16, 2014 at 10:19:31PM +0100, Zi Shen Lim wrote:
> > >> >
> > >> > Is a BUG_ON justifiable here? Is there not a nicer way to fail?
> > >>
> > >> In general, it'd be nice if we returned something like -EINVAL and
> > >> have all callers handle failures. Today all code gen functions return
> > >> the u32 instruction and there's no error handling by callers.
> > >> I think following the precedence (aarch64_insn_gen_branch_imm())
> > >> of failing with BUG_ON is a reasonable tradeoff.
> > >
> > > Well, I don't necessarily agree with that BUG_ON, either :)
> > > I take it eBPF doesn't have a `trap' instruction or similar? Otherwise, we
> > > could generate that and avoid having to propagate errors directly to the
> > > caller.
> > >
> > >> In this case here, when we hit the default (failure) case, that means
> > >> there's a serious error of attempting to use an unsupported
> > >> variant. I think we're better off failing hard here than trying to
> > >> arbitrarily "fallback" on a default choice.
> > >
> > > It might be a serious error for BPF, but a BUG_ON brings down the entire
> > > machine, which I think is unfortunate.
> >
> > There is some misunderstanding here. Here BUG_ON will trigger
> > only on actual bug in JIT implementation, it cannot be triggered by user.
> > eBPF program is verified before it reaches JIT, so all instructions are
> > valid and input to JIT is proper. Two instruction are not yet
> > implemented in this JIT and they trigger pr_.._once().
> > So I don't see any issue with this usage of BUG_ON
> > imo living with silent bugs in JIT is more dangerous.
> >
> > For the same reason there is no 'trap' instruction in eBPF.
> > Static verifier checks that program is valid. If there was a 'trap'
> > insn the program would be rejected. Like programs with
> > 'div by zero' are rejected. There is normal 'bpf_exit' insn to
> > return from the program.
>
> Ok, so assuming that BPF doesn't have any issues, I take your point.
> However, we could very easily re-use these functions for things like SMP
> alternatives and kprobes, where simply failing the instruction generation
> might be acceptable.
>
> It just feels like a bit hammer to me, when the machine is probably happily
> scheduling user tasks, responding to interrupts, writing data to disk etc.

Yes I agree with you Will, it'd be truly unfortunate if we inadvertently
allow the entire system to be brought down.

Alexei accurately pointed out that if we ever hit such a case, it'd be a bug
in the BPF JIT implementation (or bug in other in-kernel implementations).

Our BPF JIT implementation actually handles this, making sure that input
to the codegen function is valid, or gracefully fail by not JITing and
falling back on the core BPF interpreter. This way our JIT will not trigger
the BUG_ON.

IMO, other future users of these codegen functions should do the same.

An alternative would be to throw away all the BUG_ON and have callers
check for and handle error conditions. I think this is actually more
dangerous as callers who don't handle the error conditions properly may
end up causing system crash later with subtle (and quite possibly hard to
debug) bugs.

>
> Will
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/
diff mbox

Patch

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index dc1f73b..a98c495 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -2,6 +2,8 @@ 
  * Copyright (C) 2013 Huawei Ltd.
  * Author: Jiang Liu <liuj97@gmail.com>
  *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -67,9 +69,58 @@  enum aarch64_insn_imm_type {
 	AARCH64_INSN_IMM_MAX
 };
 
+enum aarch64_insn_register_type {
+	AARCH64_INSN_REGTYPE_RT,
+};
+
+enum aarch64_insn_register {
+	AARCH64_INSN_REG_0  = 0,
+	AARCH64_INSN_REG_1  = 1,
+	AARCH64_INSN_REG_2  = 2,
+	AARCH64_INSN_REG_3  = 3,
+	AARCH64_INSN_REG_4  = 4,
+	AARCH64_INSN_REG_5  = 5,
+	AARCH64_INSN_REG_6  = 6,
+	AARCH64_INSN_REG_7  = 7,
+	AARCH64_INSN_REG_8  = 8,
+	AARCH64_INSN_REG_9  = 9,
+	AARCH64_INSN_REG_10 = 10,
+	AARCH64_INSN_REG_11 = 11,
+	AARCH64_INSN_REG_12 = 12,
+	AARCH64_INSN_REG_13 = 13,
+	AARCH64_INSN_REG_14 = 14,
+	AARCH64_INSN_REG_15 = 15,
+	AARCH64_INSN_REG_16 = 16,
+	AARCH64_INSN_REG_17 = 17,
+	AARCH64_INSN_REG_18 = 18,
+	AARCH64_INSN_REG_19 = 19,
+	AARCH64_INSN_REG_20 = 20,
+	AARCH64_INSN_REG_21 = 21,
+	AARCH64_INSN_REG_22 = 22,
+	AARCH64_INSN_REG_23 = 23,
+	AARCH64_INSN_REG_24 = 24,
+	AARCH64_INSN_REG_25 = 25,
+	AARCH64_INSN_REG_26 = 26,
+	AARCH64_INSN_REG_27 = 27,
+	AARCH64_INSN_REG_28 = 28,
+	AARCH64_INSN_REG_29 = 29,
+	AARCH64_INSN_REG_FP = 29, /* Frame pointer */
+	AARCH64_INSN_REG_30 = 30,
+	AARCH64_INSN_REG_LR = 30, /* Link register */
+	AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
+	AARCH64_INSN_REG_SP = 31  /* Stack pointer: as load/store base reg */
+};
+
+enum aarch64_insn_variant {
+	AARCH64_INSN_VARIANT_32BIT,
+	AARCH64_INSN_VARIANT_64BIT
+};
+
 enum aarch64_insn_branch_type {
 	AARCH64_INSN_BRANCH_NOLINK,
 	AARCH64_INSN_BRANCH_LINK,
+	AARCH64_INSN_BRANCH_COMP_ZERO,
+	AARCH64_INSN_BRANCH_COMP_NONZERO,
 };
 
 #define	__AARCH64_INSN_FUNCS(abbr, mask, val)	\
@@ -80,6 +131,8 @@  static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
 
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(cbz,	0xFE000000, 0x34000000)
+__AARCH64_INSN_FUNCS(cbnz,	0xFE000000, 0x35000000)
 __AARCH64_INSN_FUNCS(svc,	0xFFE0001F, 0xD4000001)
 __AARCH64_INSN_FUNCS(hvc,	0xFFE0001F, 0xD4000002)
 __AARCH64_INSN_FUNCS(smc,	0xFFE0001F, 0xD4000003)
@@ -97,6 +150,10 @@  u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 				  u32 insn, u64 imm);
 u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 				enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+				     enum aarch64_insn_register reg,
+				     enum aarch64_insn_variant variant,
+				     enum aarch64_insn_branch_type type);
 u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
 u32 aarch64_insn_gen_nop(void);
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 92f3683..d01bb4e 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -2,6 +2,8 @@ 
  * Copyright (C) 2013 Huawei Ltd.
  * Author: Jiang Liu <liuj97@gmail.com>
  *
+ * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -264,10 +266,36 @@  u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 	return insn;
 }
 
-u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
-					  enum aarch64_insn_branch_type type)
+static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
+					u32 insn,
+					enum aarch64_insn_register reg)
+{
+	int shift;
+
+	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
+		pr_err("%s: unknown register encoding %d\n", __func__, reg);
+		return 0;
+	}
+
+	switch (type) {
+	case AARCH64_INSN_REGTYPE_RT:
+		shift = 0;
+		break;
+	default:
+		pr_err("%s: unknown register type encoding %d\n", __func__,
+		       type);
+		return 0;
+	}
+
+	insn &= ~(GENMASK(4, 0) << shift);
+	insn |= reg << shift;
+
+	return insn;
+}
+
+static inline long branch_imm_common(unsigned long pc, unsigned long addr,
+				     long range)
 {
-	u32 insn;
 	long offset;
 
 	/*
@@ -276,13 +304,24 @@  u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 	 */
 	BUG_ON((pc & 0x3) || (addr & 0x3));
 
+	offset = ((long)addr - (long)pc);
+	BUG_ON(offset < -range || offset >= range);
+
+	return offset;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+					  enum aarch64_insn_branch_type type)
+{
+	u32 insn;
+	long offset;
+
 	/*
 	 * B/BL support [-128M, 128M) offset
 	 * ARM64 virtual address arrangement guarantees all kernel and module
 	 * texts are within +/-128M.
 	 */
-	offset = ((long)addr - (long)pc);
-	BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+	offset = branch_imm_common(pc, addr, SZ_128M);
 
 	if (type == AARCH64_INSN_BRANCH_LINK)
 		insn = aarch64_insn_get_bl_value();
@@ -293,6 +332,43 @@  u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 					     offset >> 2);
 }
 
+u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
+				     enum aarch64_insn_register reg,
+				     enum aarch64_insn_variant variant,
+				     enum aarch64_insn_branch_type type)
+{
+	u32 insn;
+	long offset;
+
+	offset = branch_imm_common(pc, addr, SZ_1M);
+
+	switch (type) {
+	case AARCH64_INSN_BRANCH_COMP_ZERO:
+		insn = aarch64_insn_get_cbz_value();
+		break;
+	case AARCH64_INSN_BRANCH_COMP_NONZERO:
+		insn = aarch64_insn_get_cbnz_value();
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= BIT(31);
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
+
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+					     offset >> 2);
+}
+
 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
 {
 	return aarch64_insn_get_hint_value() | op;