diff mbox series

[2/2] target/riscv: Mark amo insns during translation

Message ID 20220401125948.79292-3-richard.henderson@linaro.org
State New
Headers show
Series target/riscv: Annotate atomic operations | expand

Commit Message

Richard Henderson April 1, 2022, 12:59 p.m. UTC
Atomic memory operations perform both reads and writes as part
of their implementation, but always raise write faults.

Use TARGET_INSN_START_EXTRA_WORDS to mark amo insns in the
opcode stream, and force the access type to write at the
point of raising the exception.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/riscv/cpu.h                      | 15 ++++++
 target/riscv/cpu.c                      |  3 ++
 target/riscv/cpu_helper.c               | 62 +++++++++++++++++--------
 target/riscv/translate.c                |  9 ++++
 target/riscv/insn_trans/trans_rva.c.inc | 11 ++++-
 5 files changed, 79 insertions(+), 21 deletions(-)

Comments

Alistair Francis April 8, 2022, 6:16 a.m. UTC | #1
On Fri, Apr 1, 2022 at 11:04 PM Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Atomic memory operations perform both reads and writes as part
> of their implementation, but always raise write faults.
>
> Use TARGET_INSN_START_EXTRA_WORDS to mark amo insns in the
> opcode stream, and force the access type to write at the
> point of raising the exception.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  target/riscv/cpu.h                      | 15 ++++++
>  target/riscv/cpu.c                      |  3 ++
>  target/riscv/cpu_helper.c               | 62 +++++++++++++++++--------
>  target/riscv/translate.c                |  9 ++++
>  target/riscv/insn_trans/trans_rva.c.inc | 11 ++++-
>  5 files changed, 79 insertions(+), 21 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index c069fe85fa..3de4da3fa1 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -290,6 +290,13 @@ struct CPUArchState {
>      /* True if in debugger mode.  */
>      bool debugger;
>
> +    /*
> +     * True if unwinding through an amo insn.  Used to transform a
> +     * read fault into a store_amo fault; only valid immediately
> +     * after cpu_restore_state().
> +     */
> +    bool unwind_amo;
> +
>      /*
>       * CSRs for PointerMasking extension
>       */
> @@ -517,6 +524,14 @@ FIELD(TB_FLAGS, XL, 20, 2)
>  FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
>  FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
>
> +#ifndef CONFIG_USER_ONLY
> +/*
> + * RISC-V-specific extra insn start words:
> + * 1: True if the instruction is AMO, false otherwise.
> + */
> +#define TARGET_INSN_START_EXTRA_WORDS 1
> +#endif
> +
>  #ifdef TARGET_RISCV32
>  #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
>  #else
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index ddda4906ff..3818d5ba80 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -396,6 +396,9 @@ void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
>      } else {
>          env->pc = data[0];
>      }
> +#ifndef CONFIG_USER_ONLY
> +    env->unwind_amo = data[1];
> +#endif
>  }
>
>  static void riscv_cpu_reset(DeviceState *dev)
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 126251d5da..b5bbe6fc39 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -1139,26 +1139,11 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
>      RISCVCPU *cpu = RISCV_CPU(cs);
>      CPURISCVState *env = &cpu->env;
>
> -    if (access_type == MMU_DATA_STORE) {
> -        cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
> -    } else if (access_type == MMU_DATA_LOAD) {
> -        cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
> -    } else {
> -        cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
> +    cpu_restore_state(cs, retaddr, true);
> +    if (env->unwind_amo) {
> +        access_type = MMU_DATA_STORE;
>      }
>
> -    env->badaddr = addr;
> -    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> -                            riscv_cpu_two_stage_lookup(mmu_idx);
> -    cpu_loop_exit_restore(cs, retaddr);
> -}
> -
> -void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> -                                   MMUAccessType access_type, int mmu_idx,
> -                                   uintptr_t retaddr)
> -{
> -    RISCVCPU *cpu = RISCV_CPU(cs);
> -    CPURISCVState *env = &cpu->env;
>      switch (access_type) {
>      case MMU_INST_FETCH:
>          cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
> @@ -1172,10 +1157,43 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
>      default:
>          g_assert_not_reached();
>      }
> +
>      env->badaddr = addr;
>      env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
>                              riscv_cpu_two_stage_lookup(mmu_idx);
> -    cpu_loop_exit_restore(cs, retaddr);
> +    cpu_loop_exit(cs);
> +}
> +
> +void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> +                                   MMUAccessType access_type, int mmu_idx,
> +                                   uintptr_t retaddr)
> +{
> +    RISCVCPU *cpu = RISCV_CPU(cs);
> +    CPURISCVState *env = &cpu->env;
> +
> +    cpu_restore_state(cs, retaddr, true);
> +    if (env->unwind_amo) {
> +        access_type = MMU_DATA_STORE;
> +    }
> +
> +    switch (access_type) {
> +    case MMU_INST_FETCH:
> +        cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
> +        break;
> +    case MMU_DATA_LOAD:
> +        cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
> +        break;
> +    case MMU_DATA_STORE:
> +        cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
> +        break;
> +    default:
> +        g_assert_not_reached();
> +    }
> +
> +    env->badaddr = addr;
> +    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> +                            riscv_cpu_two_stage_lookup(mmu_idx);
> +    cpu_loop_exit(cs);
>  }
>
>  bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> @@ -1307,11 +1325,15 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
>      } else if (probe) {
>          return false;
>      } else {
> +        cpu_restore_state(cs, retaddr, true);
> +        if (env->unwind_amo) {
> +            access_type = MMU_DATA_STORE;
> +        }
>          raise_mmu_exception(env, address, access_type, pmp_violation,
>                              first_stage_error,
>                              riscv_cpu_virt_enabled(env) ||
>                                  riscv_cpu_two_stage_lookup(mmu_idx));
> -        cpu_loop_exit_restore(cs, retaddr);
> +        cpu_loop_exit(cs);
>      }
>
>      return true;
> diff --git a/target/riscv/translate.c b/target/riscv/translate.c
> index fac998a6b5..ae4b0d1524 100644
> --- a/target/riscv/translate.c
> +++ b/target/riscv/translate.c
> @@ -107,6 +107,10 @@ typedef struct DisasContext {
>      /* PointerMasking extension */
>      bool pm_mask_enabled;
>      bool pm_base_enabled;
> +#ifndef CONFIG_USER_ONLY
> +    /* TCG op of the current insn_start.  */
> +    TCGOp *insn_start;
> +#endif
>  } DisasContext;
>
>  static inline bool has_ext(DisasContext *ctx, uint32_t ext)
> @@ -1105,7 +1109,12 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
>  {
>      DisasContext *ctx = container_of(dcbase, DisasContext, base);
>
> +#ifdef CONFIG_USER_ONLY
>      tcg_gen_insn_start(ctx->base.pc_next);
> +#else
> +    tcg_gen_insn_start(ctx->base.pc_next, 0);
> +    ctx->insn_start = tcg_last_op();
> +#endif
>  }
>
>  static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
> diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
> index 45db82c9be..66faa8f1da 100644
> --- a/target/riscv/insn_trans/trans_rva.c.inc
> +++ b/target/riscv/insn_trans/trans_rva.c.inc
> @@ -37,6 +37,13 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
>      return true;
>  }
>
> +static void record_insn_start_amo(DisasContext *ctx)
> +{
> +#ifndef CONFIG_USER_ONLY
> +    tcg_set_insn_start_param(ctx->insn_start, 1, 1);
> +#endif
> +}
> +
>  static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
>  {
>      TCGv dest, src1, src2;
> @@ -73,6 +80,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
>       */
>      tcg_gen_movi_tl(load_res, -1);
>
> +    record_insn_start_amo(ctx);
>      return true;
>  }
>
> @@ -85,8 +93,9 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
>      TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
>
>      func(dest, src1, src2, ctx->mem_idx, mop);
> -
>      gen_set_gpr(ctx, a->rd, dest);
> +
> +    record_insn_start_amo(ctx);
>      return true;
>  }
>
> --
> 2.25.1
>
>
Atish Patra April 19, 2022, 7:43 a.m. UTC | #2
On Thu, Apr 7, 2022 at 11:17 PM Alistair Francis <alistair23@gmail.com> wrote:
>
> On Fri, Apr 1, 2022 at 11:04 PM Richard Henderson
> <richard.henderson@linaro.org> wrote:
> >
> > Atomic memory operations perform both reads and writes as part
> > of their implementation, but always raise write faults.
> >
> > Use TARGET_INSN_START_EXTRA_WORDS to mark amo insns in the
> > opcode stream, and force the access type to write at the
> > point of raising the exception.
> >
> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>
> Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
>

This results in the following segfault in 5.17
Qemu tree: riscv-to-apply.next

[    1.134496] Run /init as init process
[    1.329796] mount[61]: unhandled signal 11 code 0x2 at
0x00000000000c29c6 in busybox[10000+159000]
[    1.331185] CPU: 2 PID: 61 Comm: mount Not tainted 5.17.0 #59
[    1.331632] Hardware name: riscv-virtio,qemu (DT)
[    1.332053] epc : 00000000000c29c6 ra : 00000000000a03f2 sp :
00007fffd6707ae0
[    1.332350]  gp : 000000000016c408 tp : 00000000001707a0 t0 :
0000000000001000
[    1.332575]  t1 : 0000000000000000 t2 : 0000000000080000 s0 :
0000000000163398
[    1.332797]  s1 : 0000000000163398 a0 : 0000000000000000 a1 :
0000000000000000
[    1.333018]  a2 : 0000000000171590 a3 : 0000000000000000 a4 :
0000000000000001
[    1.333371]  a5 : 0000000000000001 a6 : fffffffffbada498 a7 :
000000000000003f
[    1.333607]  s2 : 0000000000000004 s3 : 0000000000000001 s4 :
0000000000000000
[    1.333829]  s5 : 0000000000000003 s6 : 0000000000000000 s7 :
000000000016c280
[    1.334052]  s8 : 0000000000000001 s9 : 0000000000170828 s10:
0000000000000000
[    1.334275]  s11: 0000000000000001 t3 : 0000000000000000 t4 :
00000000001410f0
[    1.334500]  t5 : 0000000000000005 t6 : ffffffffffffffff
[    1.334669] status: 0000000200004020 badaddr: 00000000000c29c6
cause: 000000000000000f
Segmentation fault



> Alistair
>
> > ---
> >  target/riscv/cpu.h                      | 15 ++++++
> >  target/riscv/cpu.c                      |  3 ++
> >  target/riscv/cpu_helper.c               | 62 +++++++++++++++++--------
> >  target/riscv/translate.c                |  9 ++++
> >  target/riscv/insn_trans/trans_rva.c.inc | 11 ++++-
> >  5 files changed, 79 insertions(+), 21 deletions(-)
> >
> > diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> > index c069fe85fa..3de4da3fa1 100644
> > --- a/target/riscv/cpu.h
> > +++ b/target/riscv/cpu.h
> > @@ -290,6 +290,13 @@ struct CPUArchState {
> >      /* True if in debugger mode.  */
> >      bool debugger;
> >
> > +    /*
> > +     * True if unwinding through an amo insn.  Used to transform a
> > +     * read fault into a store_amo fault; only valid immediately
> > +     * after cpu_restore_state().
> > +     */
> > +    bool unwind_amo;
> > +
> >      /*
> >       * CSRs for PointerMasking extension
> >       */
> > @@ -517,6 +524,14 @@ FIELD(TB_FLAGS, XL, 20, 2)
> >  FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
> >  FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
> >
> > +#ifndef CONFIG_USER_ONLY
> > +/*
> > + * RISC-V-specific extra insn start words:
> > + * 1: True if the instruction is AMO, false otherwise.
> > + */
> > +#define TARGET_INSN_START_EXTRA_WORDS 1
> > +#endif
> > +
> >  #ifdef TARGET_RISCV32
> >  #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
> >  #else
> > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> > index ddda4906ff..3818d5ba80 100644
> > --- a/target/riscv/cpu.c
> > +++ b/target/riscv/cpu.c
> > @@ -396,6 +396,9 @@ void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
> >      } else {
> >          env->pc = data[0];
> >      }
> > +#ifndef CONFIG_USER_ONLY
> > +    env->unwind_amo = data[1];
> > +#endif
> >  }
> >
> >  static void riscv_cpu_reset(DeviceState *dev)
> > diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> > index 126251d5da..b5bbe6fc39 100644
> > --- a/target/riscv/cpu_helper.c
> > +++ b/target/riscv/cpu_helper.c
> > @@ -1139,26 +1139,11 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
> >      RISCVCPU *cpu = RISCV_CPU(cs);
> >      CPURISCVState *env = &cpu->env;
> >
> > -    if (access_type == MMU_DATA_STORE) {
> > -        cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
> > -    } else if (access_type == MMU_DATA_LOAD) {
> > -        cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
> > -    } else {
> > -        cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
> > +    cpu_restore_state(cs, retaddr, true);
> > +    if (env->unwind_amo) {
> > +        access_type = MMU_DATA_STORE;
> >      }
> >
> > -    env->badaddr = addr;
> > -    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> > -                            riscv_cpu_two_stage_lookup(mmu_idx);
> > -    cpu_loop_exit_restore(cs, retaddr);
> > -}
> > -
> > -void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> > -                                   MMUAccessType access_type, int mmu_idx,
> > -                                   uintptr_t retaddr)
> > -{
> > -    RISCVCPU *cpu = RISCV_CPU(cs);
> > -    CPURISCVState *env = &cpu->env;
> >      switch (access_type) {
> >      case MMU_INST_FETCH:
> >          cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
> > @@ -1172,10 +1157,43 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> >      default:
> >          g_assert_not_reached();
> >      }
> > +
> >      env->badaddr = addr;
> >      env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> >                              riscv_cpu_two_stage_lookup(mmu_idx);
> > -    cpu_loop_exit_restore(cs, retaddr);
> > +    cpu_loop_exit(cs);
> > +}
> > +
> > +void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> > +                                   MMUAccessType access_type, int mmu_idx,
> > +                                   uintptr_t retaddr)
> > +{
> > +    RISCVCPU *cpu = RISCV_CPU(cs);
> > +    CPURISCVState *env = &cpu->env;
> > +
> > +    cpu_restore_state(cs, retaddr, true);
> > +    if (env->unwind_amo) {
> > +        access_type = MMU_DATA_STORE;
> > +    }
> > +
> > +    switch (access_type) {
> > +    case MMU_INST_FETCH:
> > +        cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
> > +        break;
> > +    case MMU_DATA_LOAD:
> > +        cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
> > +        break;
> > +    case MMU_DATA_STORE:
> > +        cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
> > +        break;
> > +    default:
> > +        g_assert_not_reached();
> > +    }
> > +
> > +    env->badaddr = addr;
> > +    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> > +                            riscv_cpu_two_stage_lookup(mmu_idx);
> > +    cpu_loop_exit(cs);
> >  }
> >
> >  bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> > @@ -1307,11 +1325,15 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> >      } else if (probe) {
> >          return false;
> >      } else {
> > +        cpu_restore_state(cs, retaddr, true);
> > +        if (env->unwind_amo) {
> > +            access_type = MMU_DATA_STORE;
> > +        }
> >          raise_mmu_exception(env, address, access_type, pmp_violation,
> >                              first_stage_error,
> >                              riscv_cpu_virt_enabled(env) ||
> >                                  riscv_cpu_two_stage_lookup(mmu_idx));
> > -        cpu_loop_exit_restore(cs, retaddr);
> > +        cpu_loop_exit(cs);
> >      }
> >
> >      return true;
> > diff --git a/target/riscv/translate.c b/target/riscv/translate.c
> > index fac998a6b5..ae4b0d1524 100644
> > --- a/target/riscv/translate.c
> > +++ b/target/riscv/translate.c
> > @@ -107,6 +107,10 @@ typedef struct DisasContext {
> >      /* PointerMasking extension */
> >      bool pm_mask_enabled;
> >      bool pm_base_enabled;
> > +#ifndef CONFIG_USER_ONLY
> > +    /* TCG op of the current insn_start.  */
> > +    TCGOp *insn_start;
> > +#endif
> >  } DisasContext;
> >
> >  static inline bool has_ext(DisasContext *ctx, uint32_t ext)
> > @@ -1105,7 +1109,12 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
> >  {
> >      DisasContext *ctx = container_of(dcbase, DisasContext, base);
> >
> > +#ifdef CONFIG_USER_ONLY
> >      tcg_gen_insn_start(ctx->base.pc_next);
> > +#else
> > +    tcg_gen_insn_start(ctx->base.pc_next, 0);
> > +    ctx->insn_start = tcg_last_op();
> > +#endif
> >  }
> >
> >  static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
> > diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
> > index 45db82c9be..66faa8f1da 100644
> > --- a/target/riscv/insn_trans/trans_rva.c.inc
> > +++ b/target/riscv/insn_trans/trans_rva.c.inc
> > @@ -37,6 +37,13 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
> >      return true;
> >  }
> >
> > +static void record_insn_start_amo(DisasContext *ctx)
> > +{
> > +#ifndef CONFIG_USER_ONLY
> > +    tcg_set_insn_start_param(ctx->insn_start, 1, 1);
> > +#endif
> > +}
> > +
> >  static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
> >  {
> >      TCGv dest, src1, src2;
> > @@ -73,6 +80,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
> >       */
> >      tcg_gen_movi_tl(load_res, -1);
> >
> > +    record_insn_start_amo(ctx);
> >      return true;
> >  }
> >
> > @@ -85,8 +93,9 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
> >      TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
> >
> >      func(dest, src1, src2, ctx->mem_idx, mop);
> > -
> >      gen_set_gpr(ctx, a->rd, dest);
> > +
> > +    record_insn_start_amo(ctx);
> >      return true;
> >  }
> >
> > --
> > 2.25.1
> >
> >
>
diff mbox series

Patch

diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index c069fe85fa..3de4da3fa1 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -290,6 +290,13 @@  struct CPUArchState {
     /* True if in debugger mode.  */
     bool debugger;
 
+    /*
+     * True if unwinding through an amo insn.  Used to transform a
+     * read fault into a store_amo fault; only valid immediately
+     * after cpu_restore_state().
+     */
+    bool unwind_amo;
+
     /*
      * CSRs for PointerMasking extension
      */
@@ -517,6 +524,14 @@  FIELD(TB_FLAGS, XL, 20, 2)
 FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
 FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
 
+#ifndef CONFIG_USER_ONLY
+/*
+ * RISC-V-specific extra insn start words:
+ * 1: True if the instruction is AMO, false otherwise.
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 1
+#endif
+
 #ifdef TARGET_RISCV32
 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
 #else
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index ddda4906ff..3818d5ba80 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -396,6 +396,9 @@  void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
     } else {
         env->pc = data[0];
     }
+#ifndef CONFIG_USER_ONLY
+    env->unwind_amo = data[1];
+#endif
 }
 
 static void riscv_cpu_reset(DeviceState *dev)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 126251d5da..b5bbe6fc39 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1139,26 +1139,11 @@  void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
     RISCVCPU *cpu = RISCV_CPU(cs);
     CPURISCVState *env = &cpu->env;
 
-    if (access_type == MMU_DATA_STORE) {
-        cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
-    } else if (access_type == MMU_DATA_LOAD) {
-        cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
-    } else {
-        cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
+    cpu_restore_state(cs, retaddr, true);
+    if (env->unwind_amo) {
+        access_type = MMU_DATA_STORE;
     }
 
-    env->badaddr = addr;
-    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
-                            riscv_cpu_two_stage_lookup(mmu_idx);
-    cpu_loop_exit_restore(cs, retaddr);
-}
-
-void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
-                                   MMUAccessType access_type, int mmu_idx,
-                                   uintptr_t retaddr)
-{
-    RISCVCPU *cpu = RISCV_CPU(cs);
-    CPURISCVState *env = &cpu->env;
     switch (access_type) {
     case MMU_INST_FETCH:
         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
@@ -1172,10 +1157,43 @@  void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
     default:
         g_assert_not_reached();
     }
+
     env->badaddr = addr;
     env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
                             riscv_cpu_two_stage_lookup(mmu_idx);
-    cpu_loop_exit_restore(cs, retaddr);
+    cpu_loop_exit(cs);
+}
+
+void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+                                   MMUAccessType access_type, int mmu_idx,
+                                   uintptr_t retaddr)
+{
+    RISCVCPU *cpu = RISCV_CPU(cs);
+    CPURISCVState *env = &cpu->env;
+
+    cpu_restore_state(cs, retaddr, true);
+    if (env->unwind_amo) {
+        access_type = MMU_DATA_STORE;
+    }
+
+    switch (access_type) {
+    case MMU_INST_FETCH:
+        cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
+        break;
+    case MMU_DATA_LOAD:
+        cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+        break;
+    case MMU_DATA_STORE:
+        cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    env->badaddr = addr;
+    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
+                            riscv_cpu_two_stage_lookup(mmu_idx);
+    cpu_loop_exit(cs);
 }
 
 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
@@ -1307,11 +1325,15 @@  bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
     } else if (probe) {
         return false;
     } else {
+        cpu_restore_state(cs, retaddr, true);
+        if (env->unwind_amo) {
+            access_type = MMU_DATA_STORE;
+        }
         raise_mmu_exception(env, address, access_type, pmp_violation,
                             first_stage_error,
                             riscv_cpu_virt_enabled(env) ||
                                 riscv_cpu_two_stage_lookup(mmu_idx));
-        cpu_loop_exit_restore(cs, retaddr);
+        cpu_loop_exit(cs);
     }
 
     return true;
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index fac998a6b5..ae4b0d1524 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -107,6 +107,10 @@  typedef struct DisasContext {
     /* PointerMasking extension */
     bool pm_mask_enabled;
     bool pm_base_enabled;
+#ifndef CONFIG_USER_ONLY
+    /* TCG op of the current insn_start.  */
+    TCGOp *insn_start;
+#endif
 } DisasContext;
 
 static inline bool has_ext(DisasContext *ctx, uint32_t ext)
@@ -1105,7 +1109,12 @@  static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
 {
     DisasContext *ctx = container_of(dcbase, DisasContext, base);
 
+#ifdef CONFIG_USER_ONLY
     tcg_gen_insn_start(ctx->base.pc_next);
+#else
+    tcg_gen_insn_start(ctx->base.pc_next, 0);
+    ctx->insn_start = tcg_last_op();
+#endif
 }
 
 static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
index 45db82c9be..66faa8f1da 100644
--- a/target/riscv/insn_trans/trans_rva.c.inc
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -37,6 +37,13 @@  static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
     return true;
 }
 
+static void record_insn_start_amo(DisasContext *ctx)
+{
+#ifndef CONFIG_USER_ONLY
+    tcg_set_insn_start_param(ctx->insn_start, 1, 1);
+#endif
+}
+
 static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
 {
     TCGv dest, src1, src2;
@@ -73,6 +80,7 @@  static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
      */
     tcg_gen_movi_tl(load_res, -1);
 
+    record_insn_start_amo(ctx);
     return true;
 }
 
@@ -85,8 +93,9 @@  static bool gen_amo(DisasContext *ctx, arg_atomic *a,
     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
 
     func(dest, src1, src2, ctx->mem_idx, mop);
-
     gen_set_gpr(ctx, a->rd, dest);
+
+    record_insn_start_amo(ctx);
     return true;
 }