diff mbox series

[v2,5/8] target/arm: Take an exception if PC is misaligned

Message ID 20210821195958.41312-6-richard.henderson@linaro.org
State Superseded
Headers show
Series target/arm: Fix insn exception priorities | expand

Commit Message

Richard Henderson Aug. 21, 2021, 7:59 p.m. UTC
For A64, any input to an indirect branch can cause this.

For A32, many indirect branch paths force the branch to be aligned,
but BXWritePC does not.  This includes the BX instruction but also
other interworking changes to PC.  Prior to v8, this case is UNDEFINED.
With v8, this is CONSTRAINED UNPREDICTABLE and may either raise an
exception or force align the PC.

We choose to raise an exception because we have the infrastructure,
it makes the generated code for gen_bx simpler, and it has the
possibility of catching more guest bugs.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 target/arm/helper.h        |  1 +
 target/arm/syndrome.h      |  5 +++++
 target/arm/tlb_helper.c    | 24 +++++++++++++++++++++++
 target/arm/translate-a64.c | 21 ++++++++++++++++++--
 target/arm/translate.c     | 39 +++++++++++++++++++++++++++++++-------
 5 files changed, 81 insertions(+), 9 deletions(-)

-- 
2.25.1

Comments

Peter Maydell Aug. 26, 2021, 1:45 p.m. UTC | #1
On Sat, 21 Aug 2021 at 21:00, Richard Henderson
<richard.henderson@linaro.org> wrote:
>

> For A64, any input to an indirect branch can cause this.

>

> For A32, many indirect branch paths force the branch to be aligned,

> but BXWritePC does not.  This includes the BX instruction but also

> other interworking changes to PC.  Prior to v8, this case is UNDEFINED.

> With v8, this is CONSTRAINED UNPREDICTABLE and may either raise an

> exception or force align the PC.

>

> We choose to raise an exception because we have the infrastructure,

> it makes the generated code for gen_bx simpler, and it has the

> possibility of catching more guest bugs.

>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  target/arm/helper.h        |  1 +

>  target/arm/syndrome.h      |  5 +++++

>  target/arm/tlb_helper.c    | 24 +++++++++++++++++++++++

>  target/arm/translate-a64.c | 21 ++++++++++++++++++--

>  target/arm/translate.c     | 39 +++++++++++++++++++++++++++++++-------

>  5 files changed, 81 insertions(+), 9 deletions(-)

>

> diff --git a/target/arm/helper.h b/target/arm/helper.h

> index 248569b0cd..d629ee6859 100644

> --- a/target/arm/helper.h

> +++ b/target/arm/helper.h

> @@ -47,6 +47,7 @@ DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,

>  DEF_HELPER_2(exception_internal, void, env, i32)

>  DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)

>  DEF_HELPER_2(exception_bkpt_insn, void, env, i32)

> +DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)

>  DEF_HELPER_1(setend, void, env)

>  DEF_HELPER_2(wfi, void, env, i32)

>  DEF_HELPER_1(wfe, void, env)

> diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h

> index 54d135897b..e9d97fac6e 100644

> --- a/target/arm/syndrome.h

> +++ b/target/arm/syndrome.h

> @@ -275,4 +275,9 @@ static inline uint32_t syn_illegalstate(void)

>      return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;

>  }

>

> +static inline uint32_t syn_pcalignment(void)

> +{

> +    return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;

> +}

> +

>  #endif /* TARGET_ARM_SYNDROME_H */

> diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c

> index 3107f9823e..25c422976e 100644

> --- a/target/arm/tlb_helper.c

> +++ b/target/arm/tlb_helper.c

> @@ -9,6 +9,7 @@

>  #include "cpu.h"

>  #include "internals.h"

>  #include "exec/exec-all.h"

> +#include "exec/helper-proto.h"

>

>  static inline uint32_t merge_syn_data_abort(uint32_t template_syn,

>                                              unsigned int target_el,

> @@ -123,6 +124,29 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,

>      arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);

>  }

>

> +void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)

> +{

> +    int target_el = exception_target_el(env);

> +

> +    if (target_el == 2 || arm_el_is_aa64(env, target_el)) {

> +        /*

> +         * To aarch64 and aarch32 el2, pc alignment has a

> +         * special exception class.

> +         */

> +        env->exception.vaddress = pc;

> +        env->exception.fsr = 0;

> +        raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);

> +    } else {

> +        /*

> +         * To aarch32 el1, pc alignment is like data alignment

> +         * except with a prefetch abort.

> +         */

> +        ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };

> +        arm_deliver_fault(env_archcpu(env), pc, MMU_INST_FETCH,

> +                          cpu_mmu_index(env, true), &fi);


I don't think you should need to special case AArch64 vs AArch32 like this;
you can do
   env->exception.vaddress = pc;
   env->exception.fsr = the_fsr;
   raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);

for both. AArch64/AArch32-Hyp exception entry will ignore exception.fsr,
and AArch32-not-Hyp entry will ignore exception.syndrome.

We could probably do with factoring out the
"if (something) { fsr = arm_fi_to_lfsc(&fi) }  else { fsr =
arm_fi_to_sfsc(&fi); }"
logic which we currently duplicate in arm_deliver_fault() and
do_ats_write() and arm_debug_exception_fsr() and also want here.
(NB I haven't checked these really are doing exactly the same
condition check...)


-- PMM
Richard Henderson Sept. 20, 2021, 1:29 a.m. UTC | #2
On 8/26/21 6:45 AM, Peter Maydell wrote:
> On Sat, 21 Aug 2021 at 21:00, Richard Henderson

> <richard.henderson@linaro.org> wrote:

>>

>> For A64, any input to an indirect branch can cause this.

>>

>> For A32, many indirect branch paths force the branch to be aligned,

>> but BXWritePC does not.  This includes the BX instruction but also

>> other interworking changes to PC.  Prior to v8, this case is UNDEFINED.

>> With v8, this is CONSTRAINED UNPREDICTABLE and may either raise an

>> exception or force align the PC.

>>

>> We choose to raise an exception because we have the infrastructure,

>> it makes the generated code for gen_bx simpler, and it has the

>> possibility of catching more guest bugs.

>>

>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

>> ---

>>   target/arm/helper.h        |  1 +

>>   target/arm/syndrome.h      |  5 +++++

>>   target/arm/tlb_helper.c    | 24 +++++++++++++++++++++++

>>   target/arm/translate-a64.c | 21 ++++++++++++++++++--

>>   target/arm/translate.c     | 39 +++++++++++++++++++++++++++++++-------

>>   5 files changed, 81 insertions(+), 9 deletions(-)

>>

>> diff --git a/target/arm/helper.h b/target/arm/helper.h

>> index 248569b0cd..d629ee6859 100644

>> --- a/target/arm/helper.h

>> +++ b/target/arm/helper.h

>> @@ -47,6 +47,7 @@ DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,

>>   DEF_HELPER_2(exception_internal, void, env, i32)

>>   DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)

>>   DEF_HELPER_2(exception_bkpt_insn, void, env, i32)

>> +DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)

>>   DEF_HELPER_1(setend, void, env)

>>   DEF_HELPER_2(wfi, void, env, i32)

>>   DEF_HELPER_1(wfe, void, env)

>> diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h

>> index 54d135897b..e9d97fac6e 100644

>> --- a/target/arm/syndrome.h

>> +++ b/target/arm/syndrome.h

>> @@ -275,4 +275,9 @@ static inline uint32_t syn_illegalstate(void)

>>       return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;

>>   }

>>

>> +static inline uint32_t syn_pcalignment(void)

>> +{

>> +    return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;

>> +}

>> +

>>   #endif /* TARGET_ARM_SYNDROME_H */

>> diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c

>> index 3107f9823e..25c422976e 100644

>> --- a/target/arm/tlb_helper.c

>> +++ b/target/arm/tlb_helper.c

>> @@ -9,6 +9,7 @@

>>   #include "cpu.h"

>>   #include "internals.h"

>>   #include "exec/exec-all.h"

>> +#include "exec/helper-proto.h"

>>

>>   static inline uint32_t merge_syn_data_abort(uint32_t template_syn,

>>                                               unsigned int target_el,

>> @@ -123,6 +124,29 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,

>>       arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);

>>   }

>>

>> +void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)

>> +{

>> +    int target_el = exception_target_el(env);

>> +

>> +    if (target_el == 2 || arm_el_is_aa64(env, target_el)) {

>> +        /*

>> +         * To aarch64 and aarch32 el2, pc alignment has a

>> +         * special exception class.

>> +         */

>> +        env->exception.vaddress = pc;

>> +        env->exception.fsr = 0;

>> +        raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);

>> +    } else {

>> +        /*

>> +         * To aarch32 el1, pc alignment is like data alignment

>> +         * except with a prefetch abort.

>> +         */

>> +        ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };

>> +        arm_deliver_fault(env_archcpu(env), pc, MMU_INST_FETCH,

>> +                          cpu_mmu_index(env, true), &fi);

> 

> I don't think you should need to special case AArch64 vs AArch32 like this;

> you can do

>     env->exception.vaddress = pc;

>     env->exception.fsr = the_fsr;

>     raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);

> 

> for both. AArch64/AArch32-Hyp exception entry will ignore exception.fsr,

> and AArch32-not-Hyp entry will ignore exception.syndrome.


Not true.  The latter case still requires syndrome with EC_INSNABORT, etc.

I played with this a bit, but IMO the cleanest solution is the original patch.

> We could probably do with factoring out the

> "if (something) { fsr = arm_fi_to_lfsc(&fi) }  else { fsr =

> arm_fi_to_sfsc(&fi); }"

> logic which we currently duplicate in arm_deliver_fault() and

> do_ats_write() and arm_debug_exception_fsr() and also want here.

> (NB I haven't checked these really are doing exactly the same

> condition check...)


It is exactly the same two out of three; the debug_exception one is worded slightly 
different.  I think it would come out the same if one refactored 
arm_s1_regime_using_lpae_format to not use the mmu_idx but instead regime_el(mmu_idx).


r~
Peter Maydell Sept. 20, 2021, 8:08 a.m. UTC | #3
On Mon, 20 Sept 2021 at 02:29, Richard Henderson
<richard.henderson@linaro.org> wrote:
>

> On 8/26/21 6:45 AM, Peter Maydell wrote:

> > I don't think you should need to special case AArch64 vs AArch32 like this;

> > you can do

> >     env->exception.vaddress = pc;

> >     env->exception.fsr = the_fsr;

> >     raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);

> >

> > for both. AArch64/AArch32-Hyp exception entry will ignore exception.fsr,

> > and AArch32-not-Hyp entry will ignore exception.syndrome.

>

> Not true.  The latter case still requires syndrome with EC_INSNABORT, etc.


For AArch32-not-Hyp ? Syndrome doesn't matter at all in that case
(only Hyp mode and AArch64 have syndrome registers); it just needs
to take the prefetch abort exception, which you get by using
EXCP_PREFETCH_ABORT.

-- PMM
Richard Henderson Sept. 20, 2021, 1:29 p.m. UTC | #4
On 9/20/21 1:08 AM, Peter Maydell wrote:
> On Mon, 20 Sept 2021 at 02:29, Richard Henderson

> <richard.henderson@linaro.org> wrote:

>>

>> On 8/26/21 6:45 AM, Peter Maydell wrote:

>>> I don't think you should need to special case AArch64 vs AArch32 like this;

>>> you can do

>>>      env->exception.vaddress = pc;

>>>      env->exception.fsr = the_fsr;

>>>      raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);

>>>

>>> for both. AArch64/AArch32-Hyp exception entry will ignore exception.fsr,

>>> and AArch32-not-Hyp entry will ignore exception.syndrome.

>>

>> Not true.  The latter case still requires syndrome with EC_INSNABORT, etc.

> 

> For AArch32-not-Hyp ? Syndrome doesn't matter at all in that case

> (only Hyp mode and AArch64 have syndrome registers); it just needs

> to take the prefetch abort exception, which you get by using

> EXCP_PREFETCH_ABORT.


In which case I have incorrect asserts over in cpu_loop.  Sigh.

r~
diff mbox series

Patch

diff --git a/target/arm/helper.h b/target/arm/helper.h
index 248569b0cd..d629ee6859 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -47,6 +47,7 @@  DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
 DEF_HELPER_2(exception_internal, void, env, i32)
 DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
 DEF_HELPER_2(exception_bkpt_insn, void, env, i32)
+DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)
 DEF_HELPER_1(setend, void, env)
 DEF_HELPER_2(wfi, void, env, i32)
 DEF_HELPER_1(wfe, void, env)
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
index 54d135897b..e9d97fac6e 100644
--- a/target/arm/syndrome.h
+++ b/target/arm/syndrome.h
@@ -275,4 +275,9 @@  static inline uint32_t syn_illegalstate(void)
     return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;
 }
 
+static inline uint32_t syn_pcalignment(void)
+{
+    return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
 #endif /* TARGET_ARM_SYNDROME_H */
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 3107f9823e..25c422976e 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -9,6 +9,7 @@ 
 #include "cpu.h"
 #include "internals.h"
 #include "exec/exec-all.h"
+#include "exec/helper-proto.h"
 
 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
                                             unsigned int target_el,
@@ -123,6 +124,29 @@  void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
     arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
 }
 
+void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
+{
+    int target_el = exception_target_el(env);
+
+    if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
+        /*
+         * To aarch64 and aarch32 el2, pc alignment has a
+         * special exception class.
+         */
+        env->exception.vaddress = pc;
+        env->exception.fsr = 0;
+        raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
+    } else {
+        /*
+         * To aarch32 el1, pc alignment is like data alignment
+         * except with a prefetch abort.
+         */
+        ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
+        arm_deliver_fault(env_archcpu(env), pc, MMU_INST_FETCH,
+                          cpu_mmu_index(env, true), &fi);
+    }
+}
+
 #if !defined(CONFIG_USER_ONLY)
 
 /*
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 333bc836b2..39c2fb8c7e 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14752,8 +14752,10 @@  static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
 {
     DisasContext *s = container_of(dcbase, DisasContext, base);
     CPUARMState *env = cpu->env_ptr;
+    uint64_t pc = s->base.pc_next;
     uint32_t insn;
 
+    /* Singlestep exceptions have the highest priority. */
     if (s->ss_active && !s->pstate_ss) {
         /* Singlestep state is Active-pending.
          * If we're in this state at the start of a TB then either
@@ -14768,13 +14770,28 @@  static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
         assert(s->base.num_insns == 1);
         gen_swstep_exception(s, 0, 0);
         s->base.is_jmp = DISAS_NORETURN;
+        s->base.pc_next = pc + 4;
         return;
     }
 
-    s->pc_curr = s->base.pc_next;
+    if (pc & 3) {
+        /*
+         * PC alignment fault.  This has priority over the instruction abort
+         * that we would receive from a translation fault via arm_ldl_code.
+         * This should only be possible after an indirect branch, at the
+         * start of the TB.
+         */
+        assert(s->base.num_insns == 1);
+        gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+        s->base.is_jmp = DISAS_NORETURN;
+        s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
+        return;
+    }
+
+    s->pc_curr = pc;
     insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
     s->insn = insn;
-    s->base.pc_next += 4;
+    s->base.pc_next = pc + 4;
 
     s->fp_access_checked = false;
     s->sve_access_checked = false;
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 5e0fc8a0a0..dfeaa2321d 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -9452,7 +9452,7 @@  static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
     dc->insn_start = tcg_last_op();
 }
 
-static bool arm_pre_translate_insn(DisasContext *dc)
+static bool arm_check_kernelpage(DisasContext *dc)
 {
 #ifdef CONFIG_USER_ONLY
     /* Intercept jump to the magic kernel page.  */
@@ -9464,7 +9464,11 @@  static bool arm_pre_translate_insn(DisasContext *dc)
         return true;
     }
 #endif
+    return false;
+}
 
+static bool arm_check_ss_active(DisasContext *dc)
+{
     if (dc->ss_active && !dc->pstate_ss) {
         /* Singlestep state is Active-pending.
          * If we're in this state at the start of a TB then either
@@ -9498,17 +9502,38 @@  static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
 {
     DisasContext *dc = container_of(dcbase, DisasContext, base);
     CPUARMState *env = cpu->env_ptr;
+    uint32_t pc = dc->base.pc_next;
     unsigned int insn;
 
-    if (arm_pre_translate_insn(dc)) {
-        dc->base.pc_next += 4;
+    /* Singlestep exceptions have the highest priority. */
+    if (arm_check_ss_active(dc)) {
+        dc->base.pc_next = pc + 4;
         return;
     }
 
-    dc->pc_curr = dc->base.pc_next;
-    insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
+    if (pc & 3) {
+        /*
+         * PC alignment fault.  This has priority over the instruction abort
+         * that we would receive from a translation fault via arm_ldl_code
+         * (or the execution of the kernelpage entrypoint). This should only
+         * be possible after an indirect branch, at the start of the TB.
+         */
+        assert(dc->base.num_insns == 1);
+        gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+        dc->base.is_jmp = DISAS_NORETURN;
+        dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
+        return;
+    }
+
+    if (arm_check_kernelpage(dc)) {
+        dc->base.pc_next = pc + 4;
+        return;
+    }
+
+    dc->pc_curr = pc;
+    insn = arm_ldl_code(env, pc, dc->sctlr_b);
     dc->insn = insn;
-    dc->base.pc_next += 4;
+    dc->base.pc_next = pc + 4;
     disas_arm_insn(dc, insn);
 
     arm_post_translate_insn(dc);
@@ -9570,7 +9595,7 @@  static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
     uint32_t insn;
     bool is_16bit;
 
-    if (arm_pre_translate_insn(dc)) {
+    if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) {
         dc->base.pc_next += 2;
         return;
     }