diff mbox

[05/11] target-arm: Use correct mmu_idx for unprivileged loads and stores

Message ID 1422037228-5363-6-git-send-email-peter.maydell@linaro.org
State Superseded
Headers show

Commit Message

Peter Maydell Jan. 23, 2015, 6:20 p.m. UTC
The MMU index to use for unprivileged loads and stores is more
complicated than we currently implement:
 * for A64, it should be "if at EL1, access as if EL0; otherwise
   access at current EL"
 * for A32/T32, it should be "if EL2, UNPREDICTABLE; otherwise
   access as if at EL0".

In both cases, if we want to make the access for Secure EL0
this is not the same mmu_idx as for Non-Secure EL0.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target-arm/translate-a64.c | 19 ++++++++++++++++++-
 target-arm/translate.c     | 26 ++++++++++++++++++++++++--
 2 files changed, 42 insertions(+), 3 deletions(-)

Comments

Greg Bellows Jan. 26, 2015, 2:40 p.m. UTC | #1
On Fri, Jan 23, 2015 at 12:20 PM, Peter Maydell <peter.maydell@linaro.org>
wrote:

> The MMU index to use for unprivileged loads and stores is more
> complicated than we currently implement:
>  * for A64, it should be "if at EL1, access as if EL0; otherwise
>    access at current EL"
>  * for A32/T32, it should be "if EL2, UNPREDICTABLE; otherwise
>    access as if at EL0".
>
>
​The wording between the specs appears to be almost identical, curious why
the handling is different?​



> In both cases, if we want to make the access for Secure EL0
> this is not the same mmu_idx as for Non-Secure EL0.
>
> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
> ---
>  target-arm/translate-a64.c | 19 ++++++++++++++++++-
>  target-arm/translate.c     | 26 ++++++++++++++++++++++++--
>  2 files changed, 42 insertions(+), 3 deletions(-)
>
> diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
> index 96f14ff..acf4b16 100644
> --- a/target-arm/translate-a64.c
> +++ b/target-arm/translate-a64.c
> @@ -123,6 +123,23 @@ void a64_translate_init(void)
>  #endif
>  }
>
> +static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
> +{
> +    /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
> +     *  if EL1, access as if EL0; otherwise access at current EL
> +     */
> +    switch (s->mmu_idx) {
> +    case ARMMMUIdx_S12NSE1:
> +        return ARMMMUIdx_S12NSE0;
> +    case ARMMMUIdx_S1SE1:
> +        return ARMMMUIdx_S1SE0;
> +    case ARMMMUIdx_S2NS:
> +        g_assert_not_reached();
> +    default:
> +        return s->mmu_idx;
> +    }
> +}
> +
>  void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
>                              fprintf_function cpu_fprintf, int flags)
>  {
> @@ -2107,7 +2124,7 @@ static void disas_ldst_reg_imm9(DisasContext *s,
> uint32_t insn)
>          }
>      } else {
>          TCGv_i64 tcg_rt = cpu_reg(s, rt);
> -        int memidx = is_unpriv ? MMU_USER_IDX : get_mem_index(s);
> +        int memidx = is_unpriv ? get_a64_user_mem_index(s) :
> get_mem_index(s);
>
>          if (is_store) {
>              do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
> diff --git a/target-arm/translate.c b/target-arm/translate.c
> index 7163649..715f65d 100644
> --- a/target-arm/translate.c
> +++ b/target-arm/translate.c
> @@ -113,6 +113,28 @@ void arm_translate_init(void)
>      a64_translate_init();
>  }
>
> +static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
> +{
> +    /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
> +     * insns:
> +     *  if PL2, UNPREDICTABLE (we choose to implement as if PL0)
> +     *  otherwise, access as if at PL0.
> +     */
> +    switch (s->mmu_idx) {
> +    case ARMMMUIdx_S1E2:        /* this one is UNPREDICTABLE */
> +    case ARMMMUIdx_S12NSE0:
> +    case ARMMMUIdx_S12NSE1:
> +        return ARMMMUIdx_S12NSE0;
> +    case ARMMMUIdx_S1E3:
> +    case ARMMMUIdx_S1SE0:
> +    case ARMMMUIdx_S1SE1:
> +        return ARMMMUIdx_S1SE0;
> +    case ARMMMUIdx_S2NS:
> +    default:
> +        g_assert_not_reached();
> +    }
> +}
> +
>  static inline TCGv_i32 load_cpu_offset(int offset)
>  {
>      TCGv_i32 tmp = tcg_temp_new_i32();
> @@ -8793,7 +8815,7 @@ static void disas_arm_insn(DisasContext *s, unsigned
> int insn)
>              tmp2 = load_reg(s, rn);
>              if ((insn & 0x01200000) == 0x00200000) {
>                  /* ldrt/strt */
> -                i = MMU_USER_IDX;
> +                i = get_a32_user_mem_index(s);
>              } else {
>                  i = get_mem_index(s);
>              }
> @@ -10173,7 +10195,7 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
>                      break;
>                  case 0xe: /* User privilege.  */
>                      tcg_gen_addi_i32(addr, addr, imm);
> -                    memidx = MMU_USER_IDX;
> +                    memidx = get_a32_user_mem_index(s);
>                      break;
>                  case 0x9: /* Post-decrement.  */
>                      imm = -imm;
> --
> 1.9.1
>
> ​Otherwise,

Reviewed-by: Greg Bellows <greg.bellows@linaro.org>​
Peter Maydell Jan. 26, 2015, 2:56 p.m. UTC | #2
On 26 January 2015 at 14:40, Greg Bellows <greg.bellows@linaro.org> wrote:
> On Fri, Jan 23, 2015 at 12:20 PM, Peter Maydell <peter.maydell@linaro.org>
> wrote:
>>
>> The MMU index to use for unprivileged loads and stores is more
>> complicated than we currently implement:
>>  * for A64, it should be "if at EL1, access as if EL0; otherwise
>>    access at current EL"
>>  * for A32/T32, it should be "if EL2, UNPREDICTABLE; otherwise
>>    access as if at EL0".
>>
>
> The wording between the specs appears to be almost identical, curious why
> the handling is different?

Because that's what the ARM ARM specifies. Compare C3.2.5 (A64 LDT &c)
with F7.1.95 (A32/T32 LDRT).

-- PMM
Greg Bellows Jan. 26, 2015, 7:34 p.m. UTC | #3
On Mon, Jan 26, 2015 at 8:56 AM, Peter Maydell <peter.maydell@linaro.org>
wrote:

> On 26 January 2015 at 14:40, Greg Bellows <greg.bellows@linaro.org> wrote:
> > On Fri, Jan 23, 2015 at 12:20 PM, Peter Maydell <
> peter.maydell@linaro.org>
> > wrote:
> >>
> >> The MMU index to use for unprivileged loads and stores is more
> >> complicated than we currently implement:
> >>  * for A64, it should be "if at EL1, access as if EL0; otherwise
> >>    access at current EL"
> >>  * for A32/T32, it should be "if EL2, UNPREDICTABLE; otherwise
> >>    access as if at EL0".
> >>
> >
> > The wording between the specs appears to be almost identical, curious why
> > the handling is different?
>
> Because that's what the ARM ARM specifies. Compare C3.2.5 (A64 LDT &c)
> with F7.1.95 (A32/T32 LDRT).
>
​​

​I had been comparing the wording of ARMv8 - F1.6.3 and ​ARMv7 - A4.6.3.
After comparing the LDRT instructions between A64 (C6.6.97) and A32
(F7.1.95), I am still missing the distinction that warrants the following
different behavior:

- EL2 is unpredictable in both A64 and A32, but in one case we treat it as
such and the other we demote it to NS/EL0 to allow it.
- EL3 is demoted to S/EL0 in one case but remains EL3 in the other.


>
> -- PMM
>
Peter Maydell Jan. 26, 2015, 8:37 p.m. UTC | #4
On 26 January 2015 at 19:34, Greg Bellows <greg.bellows@linaro.org> wrote:
> On Mon, Jan 26, 2015 at 8:56 AM, Peter Maydell <peter.maydell@linaro.org>
> wrote:
>> Because that's what the ARM ARM specifies. Compare C3.2.5 (A64 LDT &c)
>> with F7.1.95 (A32/T32 LDRT).
>
>
> I had been comparing the wording of ARMv8 - F1.6.3 and ARMv7 - A4.6.3.
> After comparing the LDRT instructions between A64 (C6.6.97) and A32
> (F7.1.95), I am still missing the distinction that warrants the following
> different behavior:
>
> - EL2 is unpredictable in both A64 and A32, but in one case we treat it as
> such and the other we demote it to NS/EL0 to allow it.

No, it's not unpredictable in A64. It behaves as if a normal
(EL2) access [C3.2.5 "if the PE is executing in any other Exception
level, then a normal memory access for that level is performed"].
It is only unpredictable at EL2 in A32/T32 [F7.1.95 "UNPREDICTABLE
in Hyp mode"; in the v7 ARM ARM, A4.6.3 "UNPREDICTABLE if executed
at PL2"]. You'll see that the pseudocode for A32/T32 LDRT has
an UNPREDICTABLE check for PL2, but the pseudocode for A64
LDTR does not have any equivalent check.

> - EL3 is demoted to S/EL0 in one case but remains EL3 in the
> other.

Remains EL3 for AArch64 (by the same C3.2.5 requirement quoted above);
must act as if EL0 for AArch32 (F7.1.95 "as if the PE were running
in User mode").

This is because an EL3 A32/T32 insn is PL1, and AArch32 accesses
from PL1 must behave as if from PL0 (otherwise pre-v8 software
would break). An EL3 A64 insn, on the other hand, is definitely
not EL1 and there's no back-compatibility behaviour required.

Both these differences are required by the spec.

-- PMM
Greg Bellows Jan. 26, 2015, 10:01 p.m. UTC | #5
On Mon, Jan 26, 2015 at 2:37 PM, Peter Maydell <peter.maydell@linaro.org>
wrote:

> On 26 January 2015 at 19:34, Greg Bellows <greg.bellows@linaro.org> wrote:
> > On Mon, Jan 26, 2015 at 8:56 AM, Peter Maydell <peter.maydell@linaro.org
> >
> > wrote:
> >> Because that's what the ARM ARM specifies. Compare C3.2.5 (A64 LDT &c)
> >> with F7.1.95 (A32/T32 LDRT).
> >
> >
> > I had been comparing the wording of ARMv8 - F1.6.3 and ARMv7 - A4.6.3.
> > After comparing the LDRT instructions between A64 (C6.6.97) and A32
> > (F7.1.95), I am still missing the distinction that warrants the following
> > different behavior:
> >
> > - EL2 is unpredictable in both A64 and A32, but in one case we treat it
> as
> > such and the other we demote it to NS/EL0 to allow it.
>
> No, it's not unpredictable in A64. It behaves as if a normal
> (EL2) access [C3.2.5 "if the PE is executing in any other Exception
> level, then a normal memory access for that level is performed"].
> It is only unpredictable at EL2 in A32/T32 [F7.1.95 "UNPREDICTABLE
> in Hyp mode"; in the v7 ARM ARM, A4.6.3 "UNPREDICTABLE if executed
> at PL2"]. You'll see that the pseudocode for A32/T32 LDRT has
> an UNPREDICTABLE check for PL2, but the pseudocode for A64
> LDTR does not have any equivalent check.
>

​My bad, you are correct, I read S2NS too fast.​


>
> > - EL3 is demoted to S/EL0 in one case but remains EL3 in the
> > other.
>
> Remains EL3 for AArch64 (by the same C3.2.5 requirement quoted above);
> must act as if EL0 for AArch32 (F7.1.95 "as if the PE were running
> in User mode").
>
> ​Ah yes.. EL3 is PL1 on AArch32.



> This is because an EL3 A32/T32 insn is PL1, and AArch32 accesses
> from PL1 must behave as if from PL0 (otherwise pre-v8 software
> would break). An EL3 A64 insn, on the other hand, is definitely
> not EL1 and there's no back-compatibility behaviour required.
>
> Both these differences are required by the spec.
>
> -- PMM
>

​I see the differences now, thanks for the clarification.​
diff mbox

Patch

diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 96f14ff..acf4b16 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -123,6 +123,23 @@  void a64_translate_init(void)
 #endif
 }
 
+static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
+{
+    /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
+     *  if EL1, access as if EL0; otherwise access at current EL
+     */
+    switch (s->mmu_idx) {
+    case ARMMMUIdx_S12NSE1:
+        return ARMMMUIdx_S12NSE0;
+    case ARMMMUIdx_S1SE1:
+        return ARMMMUIdx_S1SE0;
+    case ARMMMUIdx_S2NS:
+        g_assert_not_reached();
+    default:
+        return s->mmu_idx;
+    }
+}
+
 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
                             fprintf_function cpu_fprintf, int flags)
 {
@@ -2107,7 +2124,7 @@  static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
         }
     } else {
         TCGv_i64 tcg_rt = cpu_reg(s, rt);
-        int memidx = is_unpriv ? MMU_USER_IDX : get_mem_index(s);
+        int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
 
         if (is_store) {
             do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx);
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 7163649..715f65d 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -113,6 +113,28 @@  void arm_translate_init(void)
     a64_translate_init();
 }
 
+static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+{
+    /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+     * insns:
+     *  if PL2, UNPREDICTABLE (we choose to implement as if PL0)
+     *  otherwise, access as if at PL0.
+     */
+    switch (s->mmu_idx) {
+    case ARMMMUIdx_S1E2:        /* this one is UNPREDICTABLE */
+    case ARMMMUIdx_S12NSE0:
+    case ARMMMUIdx_S12NSE1:
+        return ARMMMUIdx_S12NSE0;
+    case ARMMMUIdx_S1E3:
+    case ARMMMUIdx_S1SE0:
+    case ARMMMUIdx_S1SE1:
+        return ARMMMUIdx_S1SE0;
+    case ARMMMUIdx_S2NS:
+    default:
+        g_assert_not_reached();
+    }
+}
+
 static inline TCGv_i32 load_cpu_offset(int offset)
 {
     TCGv_i32 tmp = tcg_temp_new_i32();
@@ -8793,7 +8815,7 @@  static void disas_arm_insn(DisasContext *s, unsigned int insn)
             tmp2 = load_reg(s, rn);
             if ((insn & 0x01200000) == 0x00200000) {
                 /* ldrt/strt */
-                i = MMU_USER_IDX;
+                i = get_a32_user_mem_index(s);
             } else {
                 i = get_mem_index(s);
             }
@@ -10173,7 +10195,7 @@  static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
                     break;
                 case 0xe: /* User privilege.  */
                     tcg_gen_addi_i32(addr, addr, imm);
-                    memidx = MMU_USER_IDX;
+                    memidx = get_a32_user_mem_index(s);
                     break;
                 case 0x9: /* Post-decrement.  */
                     imm = -imm;