diff mbox series

[v4,18/40] target/arm: Reorganize ARMMMUIdx

Message ID 20191203022937.1474-19-richard.henderson@linaro.org
State New
Headers show
Series target/arm: Implement ARMv8.1-VHE | expand

Commit Message

Richard Henderson Dec. 3, 2019, 2:29 a.m. UTC
Prepare for, but do not yet implement, the EL2&0 regime.
This involves adding the new MMUIdx enumerators and adjusting
some of the MMUIdx related predicates to match.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 target/arm/cpu-param.h |   2 +-
 target/arm/cpu.h       | 128 ++++++++++++++++++-----------------------
 target/arm/internals.h |  37 +++++++++++-
 target/arm/helper.c    |  66 ++++++++++++++++++---
 target/arm/translate.c |   1 -
 5 files changed, 150 insertions(+), 84 deletions(-)

-- 
2.17.1

Comments

Alex Bennée Dec. 4, 2019, 1:44 p.m. UTC | #1
Richard Henderson <richard.henderson@linaro.org> writes:

> Prepare for, but do not yet implement, the EL2&0 regime.

> This involves adding the new MMUIdx enumerators and adjusting

> some of the MMUIdx related predicates to match.

>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>


Reviewed-by: Alex Bennée <alex.bennee@linaro.org>


> ---

>  target/arm/cpu-param.h |   2 +-

>  target/arm/cpu.h       | 128 ++++++++++++++++++-----------------------

>  target/arm/internals.h |  37 +++++++++++-

>  target/arm/helper.c    |  66 ++++++++++++++++++---

>  target/arm/translate.c |   1 -

>  5 files changed, 150 insertions(+), 84 deletions(-)

>

> diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h

> index 6e6948e960..18ac562346 100644

> --- a/target/arm/cpu-param.h

> +++ b/target/arm/cpu-param.h

> @@ -29,6 +29,6 @@

>  # define TARGET_PAGE_BITS_MIN  10

>  #endif

>  

> -#define NB_MMU_MODES 8

> +#define NB_MMU_MODES 9

>  

>  #endif

> diff --git a/target/arm/cpu.h b/target/arm/cpu.h

> index 015301e93a..bf8eb57e3a 100644

> --- a/target/arm/cpu.h

> +++ b/target/arm/cpu.h

> @@ -2778,7 +2778,9 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,

>   *  + NonSecure EL1 & 0 stage 1

>   *  + NonSecure EL1 & 0 stage 2

>   *  + NonSecure EL2

> - *  + Secure EL1 & EL0

> + *  + NonSecure EL2 & 0   (ARMv8.1-VHE)

> + *  + Secure EL0

> + *  + Secure EL1

>   *  + Secure EL3

>   * If EL3 is 32-bit:

>   *  + NonSecure PL1 & 0 stage 1

> @@ -2788,8 +2790,9 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,

>   * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)

>   *

>   * For QEMU, an mmu_idx is not quite the same as a translation regime because:

> - *  1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they

> - *     may differ in access permissions even if the VA->PA map is the same

> + *  1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,

> + *     because they may differ in access permissions even if the VA->PA map is

> + *     the same

>   *  2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2

>   *     translation, which means that we have one mmu_idx that deals with two

>   *     concatenated translation regimes [this sort of combined s1+2 TLB is

> @@ -2801,19 +2804,23 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,

>   *  4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"

>   *     translation regimes, because they map reasonably well to each other

>   *     and they can't both be active at the same time.

> - * This gives us the following list of mmu_idx values:

> + *  5. we want to be able to use the TLB for accesses done as part of a

> + *     stage1 page table walk, rather than having to walk the stage2 page

> + *     table over and over.

>   *

> - * NS EL0 (aka NS PL0) stage 1+2

> - * NS EL1 (aka NS PL1) stage 1+2

> + * This gives us the following list of cases:

> + *

> + * NS EL0 (aka NS PL0) EL1&0 stage 1+2

> + * NS EL1 (aka NS PL1) EL1&0 stage 1+2

> + * NS EL0 EL2&0

> + * NS EL2 EL2&0

>   * NS EL2 (aka NS PL2)

> - * S EL3 (aka S PL1)

>   * S EL0 (aka S PL0)

>   * S EL1 (not used if EL3 is 32 bit)

> - * NS EL0+1 stage 2

> + * S EL3 (aka S PL1)

> + * NS EL0&1 stage 2

>   *

> - * (The last of these is an mmu_idx because we want to be able to use the TLB

> - * for the accesses done as part of a stage 1 page table walk, rather than

> - * having to walk the stage 2 page table over and over.)

> + * for a total of 9 different mmu_idx.

>   *

>   * R profile CPUs have an MPU, but can use the same set of MMU indexes

>   * as A profile. They only need to distinguish NS EL0 and NS EL1 (and

> @@ -2851,26 +2858,47 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,

>   * For M profile we arrange them to have a bit for priv, a bit for negpri

>   * and a bit for secure.

>   */

> -#define ARM_MMU_IDX_A 0x10 /* A profile */

> -#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */

> -#define ARM_MMU_IDX_M 0x40 /* M profile */

> +#define ARM_MMU_IDX_A     0x10  /* A profile */

> +#define ARM_MMU_IDX_NOTLB 0x20  /* does not have a TLB */

> +#define ARM_MMU_IDX_M     0x40  /* M profile */

>  

> -/* meanings of the bits for M profile mmu idx values */

> -#define ARM_MMU_IDX_M_PRIV 0x1

> +/* Meanings of the bits for M profile mmu idx values */

> +#define ARM_MMU_IDX_M_PRIV   0x1

>  #define ARM_MMU_IDX_M_NEGPRI 0x2

> -#define ARM_MMU_IDX_M_S 0x4

> +#define ARM_MMU_IDX_M_S      0x4  /* Secure */

>  

> -#define ARM_MMU_IDX_TYPE_MASK (~0x7)

> -#define ARM_MMU_IDX_COREIDX_MASK 0x7

> +#define ARM_MMU_IDX_TYPE_MASK \

> +    (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)

> +#define ARM_MMU_IDX_COREIDX_MASK 0xf

>  

>  typedef enum ARMMMUIdx {

> +    /*

> +     * A-profile.

> +     */

>      ARMMMUIdx_EL10_0 = 0 | ARM_MMU_IDX_A,

> -    ARMMMUIdx_EL10_1 = 1 | ARM_MMU_IDX_A,

> -    ARMMMUIdx_E2 = 2 | ARM_MMU_IDX_A,

> -    ARMMMUIdx_SE3 = 3 | ARM_MMU_IDX_A,

> -    ARMMMUIdx_SE0 = 4 | ARM_MMU_IDX_A,

> -    ARMMMUIdx_SE1 = 5 | ARM_MMU_IDX_A,

> -    ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,

> +    ARMMMUIdx_EL20_0 = 1 | ARM_MMU_IDX_A,

> +

> +    ARMMMUIdx_EL10_1 = 2 | ARM_MMU_IDX_A,

> +

> +    ARMMMUIdx_E2 =     3 | ARM_MMU_IDX_A,

> +    ARMMMUIdx_EL20_2 = 4 | ARM_MMU_IDX_A,

> +

> +    ARMMMUIdx_SE0 =    5 | ARM_MMU_IDX_A,

> +    ARMMMUIdx_SE1 =    6 | ARM_MMU_IDX_A,

> +    ARMMMUIdx_SE3 =    7 | ARM_MMU_IDX_A,

> +

> +    ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,

> +

> +    /*

> +     * These are not allocated TLBs and are used only for AT system

> +     * instructions or for the first stage of an S12 page table walk.

> +     */

> +    ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,

> +    ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,

> +

> +    /*

> +     * M-profile.

> +     */

>      ARMMMUIdx_MUser = ARM_MMU_IDX_M,

>      ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,

>      ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,

> @@ -2879,11 +2907,6 @@ typedef enum ARMMMUIdx {

>      ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,

>      ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,

>      ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,

> -    /* Indexes below here don't have TLBs and are used only for AT system

> -     * instructions or for the first stage of an S12 page table walk.

> -     */

> -    ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,

> -    ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,

>  } ARMMMUIdx;

>  

>  /*

> @@ -2895,8 +2918,10 @@ typedef enum ARMMMUIdx {

>  

>  typedef enum ARMMMUIdxBit {

>      TO_CORE_BIT(EL10_0),

> +    TO_CORE_BIT(EL20_0),

>      TO_CORE_BIT(EL10_1),

>      TO_CORE_BIT(E2),

> +    TO_CORE_BIT(EL20_2),

>      TO_CORE_BIT(SE0),

>      TO_CORE_BIT(SE1),

>      TO_CORE_BIT(SE3),

> @@ -2916,49 +2941,6 @@ typedef enum ARMMMUIdxBit {

>  

>  #define MMU_USER_IDX 0

>  

> -static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)

> -{

> -    return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;

> -}

> -

> -static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)

> -{

> -    if (arm_feature(env, ARM_FEATURE_M)) {

> -        return mmu_idx | ARM_MMU_IDX_M;

> -    } else {

> -        return mmu_idx | ARM_MMU_IDX_A;

> -    }

> -}

> -

> -/* Return the exception level we're running at if this is our mmu_idx */

> -static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)

> -{

> -    switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {

> -    case ARM_MMU_IDX_A:

> -        return mmu_idx & 3;

> -    case ARM_MMU_IDX_M:

> -        return mmu_idx & ARM_MMU_IDX_M_PRIV;

> -    default:

> -        g_assert_not_reached();

> -    }

> -}

> -

> -/*

> - * Return the MMU index for a v7M CPU with all relevant information

> - * manually specified.

> - */

> -ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,

> -                              bool secstate, bool priv, bool negpri);

> -

> -/* Return the MMU index for a v7M CPU in the specified security and

> - * privilege state.

> - */

> -ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,

> -                                                bool secstate, bool priv);

> -

> -/* Return the MMU index for a v7M CPU in the specified security state */

> -ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);

> -

>  /**

>   * cpu_mmu_index:

>   * @env: The cpu environment

> diff --git a/target/arm/internals.h b/target/arm/internals.h

> index aee54dc105..d73615064c 100644

> --- a/target/arm/internals.h

> +++ b/target/arm/internals.h

> @@ -769,6 +769,39 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,

>                        MMUAccessType access_type, int mmu_idx,

>                        bool probe, uintptr_t retaddr);

>  

> +static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)

> +{

> +    return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;

> +}

> +

> +static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)

> +{

> +    if (arm_feature(env, ARM_FEATURE_M)) {

> +        return mmu_idx | ARM_MMU_IDX_M;

> +    } else {

> +        return mmu_idx | ARM_MMU_IDX_A;

> +    }

> +}

> +

> +int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);

> +

> +/*

> + * Return the MMU index for a v7M CPU with all relevant information

> + * manually specified.

> + */

> +ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,

> +                              bool secstate, bool priv, bool negpri);

> +

> +/*

> + * Return the MMU index for a v7M CPU in the specified security and

> + * privilege state.

> + */

> +ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,

> +                                                bool secstate, bool priv);

> +

> +/* Return the MMU index for a v7M CPU in the specified security state */

> +ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);

> +

>  /* Return true if the stage 1 translation regime is using LPAE format page

>   * tables */

>  bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);

> @@ -810,6 +843,8 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)

>      switch (mmu_idx) {

>      case ARMMMUIdx_EL10_0:

>      case ARMMMUIdx_EL10_1:

> +    case ARMMMUIdx_EL20_0:

> +    case ARMMMUIdx_EL20_2:

>      case ARMMMUIdx_Stage1_E0:

>      case ARMMMUIdx_Stage1_E1:

>      case ARMMMUIdx_E2:

> @@ -819,9 +854,9 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)

>      case ARMMMUIdx_MPriv:

>      case ARMMMUIdx_MUser:

>          return false;

> -    case ARMMMUIdx_SE3:

>      case ARMMMUIdx_SE0:

>      case ARMMMUIdx_SE1:

> +    case ARMMMUIdx_SE3:

>      case ARMMMUIdx_MSPrivNegPri:

>      case ARMMMUIdx_MSUserNegPri:

>      case ARMMMUIdx_MSPriv:

> diff --git a/target/arm/helper.c b/target/arm/helper.c

> index ec5c7fa325..f86285ffbe 100644

> --- a/target/arm/helper.c

> +++ b/target/arm/helper.c

> @@ -8561,9 +8561,11 @@ void arm_cpu_do_interrupt(CPUState *cs)

>  #endif /* !CONFIG_USER_ONLY */

>  

>  /* Return the exception level which controls this address translation regime */

> -static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)

> +static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)

>  {

>      switch (mmu_idx) {

> +    case ARMMMUIdx_EL20_0:

> +    case ARMMMUIdx_EL20_2:

>      case ARMMMUIdx_Stage2:

>      case ARMMMUIdx_E2:

>          return 2;

> @@ -8574,6 +8576,8 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)

>      case ARMMMUIdx_SE1:

>      case ARMMMUIdx_Stage1_E0:

>      case ARMMMUIdx_Stage1_E1:

> +    case ARMMMUIdx_EL10_0:

> +    case ARMMMUIdx_EL10_1:

>      case ARMMMUIdx_MPrivNegPri:

>      case ARMMMUIdx_MUserNegPri:

>      case ARMMMUIdx_MPriv:

> @@ -8675,10 +8679,14 @@ static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)

>   */

>  static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)

>  {

> -    if (mmu_idx == ARMMMUIdx_EL10_0 || mmu_idx == ARMMMUIdx_EL10_1) {

> -        mmu_idx += (ARMMMUIdx_Stage1_E0 - ARMMMUIdx_EL10_0);

> +    switch (mmu_idx) {

> +    case ARMMMUIdx_EL10_0:

> +        return ARMMMUIdx_Stage1_E0;

> +    case ARMMMUIdx_EL10_1:

> +        return ARMMMUIdx_Stage1_E1;

> +    default:

> +        return mmu_idx;

>      }

> -    return mmu_idx;

>  }

>  

>  /* Return true if the translation regime is using LPAE format page tables */

> @@ -8711,6 +8719,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)

>  {

>      switch (mmu_idx) {

>      case ARMMMUIdx_SE0:

> +    case ARMMMUIdx_EL20_0:

>      case ARMMMUIdx_Stage1_E0:

>      case ARMMMUIdx_MUser:

>      case ARMMMUIdx_MSUser:

> @@ -11136,6 +11145,31 @@ int fp_exception_el(CPUARMState *env, int cur_el)

>      return 0;

>  }

>  

> +/* Return the exception level we're running at if this is our mmu_idx */

> +int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)

> +{

> +    if (mmu_idx & ARM_MMU_IDX_M) {

> +        return mmu_idx & ARM_MMU_IDX_M_PRIV;

> +    }

> +

> +    switch (mmu_idx) {

> +    case ARMMMUIdx_EL10_0:

> +    case ARMMMUIdx_EL20_0:

> +    case ARMMMUIdx_SE0:

> +        return 0;

> +    case ARMMMUIdx_EL10_1:

> +    case ARMMMUIdx_SE1:

> +        return 1;

> +    case ARMMMUIdx_E2:

> +    case ARMMMUIdx_EL20_2:

> +        return 2;

> +    case ARMMMUIdx_SE3:

> +        return 3;

> +    default:

> +        g_assert_not_reached();

> +    }

> +}

> +

>  #ifndef CONFIG_TCG

>  ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)

>  {

> @@ -11149,10 +11183,26 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)

>          return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);

>      }

>  

> -    if (el < 2 && arm_is_secure_below_el3(env)) {

> -        return ARMMMUIdx_SE0 + el;

> -    } else {

> -        return ARMMMUIdx_EL10_0 + el;

> +    switch (el) {

> +    case 0:

> +        /* TODO: ARMv8.1-VHE */

> +        if (arm_is_secure_below_el3(env)) {

> +            return ARMMMUIdx_SE0;

> +        }

> +        return ARMMMUIdx_EL10_0;

> +    case 1:

> +        if (arm_is_secure_below_el3(env)) {

> +            return ARMMMUIdx_SE1;

> +        }

> +        return ARMMMUIdx_EL10_1;

> +    case 2:

> +        /* TODO: ARMv8.1-VHE */

> +        /* TODO: ARMv8.4-SecEL2 */

> +        return ARMMMUIdx_E2;

> +    case 3:

> +        return ARMMMUIdx_SE3;

> +    default:

> +        g_assert_not_reached();

>      }

>  }

>  

> diff --git a/target/arm/translate.c b/target/arm/translate.c

> index cd757165e1..b7f726e733 100644

> --- a/target/arm/translate.c

> +++ b/target/arm/translate.c

> @@ -172,7 +172,6 @@ static inline int get_a32_user_mem_index(DisasContext *s)

>      case ARMMMUIdx_MSUserNegPri:

>      case ARMMMUIdx_MSPrivNegPri:

>          return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);

> -    case ARMMMUIdx_Stage2:

>      default:

>          g_assert_not_reached();

>      }



-- 
Alex Bennée
diff mbox series

Patch

diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index 6e6948e960..18ac562346 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -29,6 +29,6 @@ 
 # define TARGET_PAGE_BITS_MIN  10
 #endif
 
-#define NB_MMU_MODES 8
+#define NB_MMU_MODES 9
 
 #endif
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 015301e93a..bf8eb57e3a 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -2778,7 +2778,9 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
  *  + NonSecure EL1 & 0 stage 1
  *  + NonSecure EL1 & 0 stage 2
  *  + NonSecure EL2
- *  + Secure EL1 & EL0
+ *  + NonSecure EL2 & 0   (ARMv8.1-VHE)
+ *  + Secure EL0
+ *  + Secure EL1
  *  + Secure EL3
  * If EL3 is 32-bit:
  *  + NonSecure PL1 & 0 stage 1
@@ -2788,8 +2790,9 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
  * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
  *
  * For QEMU, an mmu_idx is not quite the same as a translation regime because:
- *  1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
- *     may differ in access permissions even if the VA->PA map is the same
+ *  1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
+ *     because they may differ in access permissions even if the VA->PA map is
+ *     the same
  *  2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
  *     translation, which means that we have one mmu_idx that deals with two
  *     concatenated translation regimes [this sort of combined s1+2 TLB is
@@ -2801,19 +2804,23 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
  *  4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
  *     translation regimes, because they map reasonably well to each other
  *     and they can't both be active at the same time.
- * This gives us the following list of mmu_idx values:
+ *  5. we want to be able to use the TLB for accesses done as part of a
+ *     stage1 page table walk, rather than having to walk the stage2 page
+ *     table over and over.
  *
- * NS EL0 (aka NS PL0) stage 1+2
- * NS EL1 (aka NS PL1) stage 1+2
+ * This gives us the following list of cases:
+ *
+ * NS EL0 (aka NS PL0) EL1&0 stage 1+2
+ * NS EL1 (aka NS PL1) EL1&0 stage 1+2
+ * NS EL0 EL2&0
+ * NS EL2 EL2&0
  * NS EL2 (aka NS PL2)
- * S EL3 (aka S PL1)
  * S EL0 (aka S PL0)
  * S EL1 (not used if EL3 is 32 bit)
- * NS EL0+1 stage 2
+ * S EL3 (aka S PL1)
+ * NS EL0&1 stage 2
  *
- * (The last of these is an mmu_idx because we want to be able to use the TLB
- * for the accesses done as part of a stage 1 page table walk, rather than
- * having to walk the stage 2 page table over and over.)
+ * for a total of 9 different mmu_idx.
  *
  * R profile CPUs have an MPU, but can use the same set of MMU indexes
  * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
@@ -2851,26 +2858,47 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
  * For M profile we arrange them to have a bit for priv, a bit for negpri
  * and a bit for secure.
  */
-#define ARM_MMU_IDX_A 0x10 /* A profile */
-#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
-#define ARM_MMU_IDX_M 0x40 /* M profile */
+#define ARM_MMU_IDX_A     0x10  /* A profile */
+#define ARM_MMU_IDX_NOTLB 0x20  /* does not have a TLB */
+#define ARM_MMU_IDX_M     0x40  /* M profile */
 
-/* meanings of the bits for M profile mmu idx values */
-#define ARM_MMU_IDX_M_PRIV 0x1
+/* Meanings of the bits for M profile mmu idx values */
+#define ARM_MMU_IDX_M_PRIV   0x1
 #define ARM_MMU_IDX_M_NEGPRI 0x2
-#define ARM_MMU_IDX_M_S 0x4
+#define ARM_MMU_IDX_M_S      0x4  /* Secure */
 
-#define ARM_MMU_IDX_TYPE_MASK (~0x7)
-#define ARM_MMU_IDX_COREIDX_MASK 0x7
+#define ARM_MMU_IDX_TYPE_MASK \
+    (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
+#define ARM_MMU_IDX_COREIDX_MASK 0xf
 
 typedef enum ARMMMUIdx {
+    /*
+     * A-profile.
+     */
     ARMMMUIdx_EL10_0 = 0 | ARM_MMU_IDX_A,
-    ARMMMUIdx_EL10_1 = 1 | ARM_MMU_IDX_A,
-    ARMMMUIdx_E2 = 2 | ARM_MMU_IDX_A,
-    ARMMMUIdx_SE3 = 3 | ARM_MMU_IDX_A,
-    ARMMMUIdx_SE0 = 4 | ARM_MMU_IDX_A,
-    ARMMMUIdx_SE1 = 5 | ARM_MMU_IDX_A,
-    ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL20_0 = 1 | ARM_MMU_IDX_A,
+
+    ARMMMUIdx_EL10_1 = 2 | ARM_MMU_IDX_A,
+
+    ARMMMUIdx_E2 =     3 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL20_2 = 4 | ARM_MMU_IDX_A,
+
+    ARMMMUIdx_SE0 =    5 | ARM_MMU_IDX_A,
+    ARMMMUIdx_SE1 =    6 | ARM_MMU_IDX_A,
+    ARMMMUIdx_SE3 =    7 | ARM_MMU_IDX_A,
+
+    ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,
+
+    /*
+     * These are not allocated TLBs and are used only for AT system
+     * instructions or for the first stage of an S12 page table walk.
+     */
+    ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
+    ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
+
+    /*
+     * M-profile.
+     */
     ARMMMUIdx_MUser = ARM_MMU_IDX_M,
     ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
     ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
@@ -2879,11 +2907,6 @@  typedef enum ARMMMUIdx {
     ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
     ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
     ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
-    /* Indexes below here don't have TLBs and are used only for AT system
-     * instructions or for the first stage of an S12 page table walk.
-     */
-    ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
-    ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
 } ARMMMUIdx;
 
 /*
@@ -2895,8 +2918,10 @@  typedef enum ARMMMUIdx {
 
 typedef enum ARMMMUIdxBit {
     TO_CORE_BIT(EL10_0),
+    TO_CORE_BIT(EL20_0),
     TO_CORE_BIT(EL10_1),
     TO_CORE_BIT(E2),
+    TO_CORE_BIT(EL20_2),
     TO_CORE_BIT(SE0),
     TO_CORE_BIT(SE1),
     TO_CORE_BIT(SE3),
@@ -2916,49 +2941,6 @@  typedef enum ARMMMUIdxBit {
 
 #define MMU_USER_IDX 0
 
-static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
-{
-    return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
-}
-
-static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
-{
-    if (arm_feature(env, ARM_FEATURE_M)) {
-        return mmu_idx | ARM_MMU_IDX_M;
-    } else {
-        return mmu_idx | ARM_MMU_IDX_A;
-    }
-}
-
-/* Return the exception level we're running at if this is our mmu_idx */
-static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
-{
-    switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
-    case ARM_MMU_IDX_A:
-        return mmu_idx & 3;
-    case ARM_MMU_IDX_M:
-        return mmu_idx & ARM_MMU_IDX_M_PRIV;
-    default:
-        g_assert_not_reached();
-    }
-}
-
-/*
- * Return the MMU index for a v7M CPU with all relevant information
- * manually specified.
- */
-ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
-                              bool secstate, bool priv, bool negpri);
-
-/* Return the MMU index for a v7M CPU in the specified security and
- * privilege state.
- */
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
-                                                bool secstate, bool priv);
-
-/* Return the MMU index for a v7M CPU in the specified security state */
-ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
-
 /**
  * cpu_mmu_index:
  * @env: The cpu environment
diff --git a/target/arm/internals.h b/target/arm/internals.h
index aee54dc105..d73615064c 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -769,6 +769,39 @@  bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
                       MMUAccessType access_type, int mmu_idx,
                       bool probe, uintptr_t retaddr);
 
+static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
+{
+    return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
+}
+
+static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
+{
+    if (arm_feature(env, ARM_FEATURE_M)) {
+        return mmu_idx | ARM_MMU_IDX_M;
+    } else {
+        return mmu_idx | ARM_MMU_IDX_A;
+    }
+}
+
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
+
+/*
+ * Return the MMU index for a v7M CPU with all relevant information
+ * manually specified.
+ */
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
+                              bool secstate, bool priv, bool negpri);
+
+/*
+ * Return the MMU index for a v7M CPU in the specified security and
+ * privilege state.
+ */
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
+                                                bool secstate, bool priv);
+
+/* Return the MMU index for a v7M CPU in the specified security state */
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
+
 /* Return true if the stage 1 translation regime is using LPAE format page
  * tables */
 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
@@ -810,6 +843,8 @@  static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
     switch (mmu_idx) {
     case ARMMMUIdx_EL10_0:
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL20_0:
+    case ARMMMUIdx_EL20_2:
     case ARMMMUIdx_Stage1_E0:
     case ARMMMUIdx_Stage1_E1:
     case ARMMMUIdx_E2:
@@ -819,9 +854,9 @@  static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
     case ARMMMUIdx_MPriv:
     case ARMMMUIdx_MUser:
         return false;
-    case ARMMMUIdx_SE3:
     case ARMMMUIdx_SE0:
     case ARMMMUIdx_SE1:
+    case ARMMMUIdx_SE3:
     case ARMMMUIdx_MSPrivNegPri:
     case ARMMMUIdx_MSUserNegPri:
     case ARMMMUIdx_MSPriv:
diff --git a/target/arm/helper.c b/target/arm/helper.c
index ec5c7fa325..f86285ffbe 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -8561,9 +8561,11 @@  void arm_cpu_do_interrupt(CPUState *cs)
 #endif /* !CONFIG_USER_ONLY */
 
 /* Return the exception level which controls this address translation regime */
-static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
+static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
 {
     switch (mmu_idx) {
+    case ARMMMUIdx_EL20_0:
+    case ARMMMUIdx_EL20_2:
     case ARMMMUIdx_Stage2:
     case ARMMMUIdx_E2:
         return 2;
@@ -8574,6 +8576,8 @@  static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
     case ARMMMUIdx_SE1:
     case ARMMMUIdx_Stage1_E0:
     case ARMMMUIdx_Stage1_E1:
+    case ARMMMUIdx_EL10_0:
+    case ARMMMUIdx_EL10_1:
     case ARMMMUIdx_MPrivNegPri:
     case ARMMMUIdx_MUserNegPri:
     case ARMMMUIdx_MPriv:
@@ -8675,10 +8679,14 @@  static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
  */
 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
 {
-    if (mmu_idx == ARMMMUIdx_EL10_0 || mmu_idx == ARMMMUIdx_EL10_1) {
-        mmu_idx += (ARMMMUIdx_Stage1_E0 - ARMMMUIdx_EL10_0);
+    switch (mmu_idx) {
+    case ARMMMUIdx_EL10_0:
+        return ARMMMUIdx_Stage1_E0;
+    case ARMMMUIdx_EL10_1:
+        return ARMMMUIdx_Stage1_E1;
+    default:
+        return mmu_idx;
     }
-    return mmu_idx;
 }
 
 /* Return true if the translation regime is using LPAE format page tables */
@@ -8711,6 +8719,7 @@  static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
 {
     switch (mmu_idx) {
     case ARMMMUIdx_SE0:
+    case ARMMMUIdx_EL20_0:
     case ARMMMUIdx_Stage1_E0:
     case ARMMMUIdx_MUser:
     case ARMMMUIdx_MSUser:
@@ -11136,6 +11145,31 @@  int fp_exception_el(CPUARMState *env, int cur_el)
     return 0;
 }
 
+/* Return the exception level we're running at if this is our mmu_idx */
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
+{
+    if (mmu_idx & ARM_MMU_IDX_M) {
+        return mmu_idx & ARM_MMU_IDX_M_PRIV;
+    }
+
+    switch (mmu_idx) {
+    case ARMMMUIdx_EL10_0:
+    case ARMMMUIdx_EL20_0:
+    case ARMMMUIdx_SE0:
+        return 0;
+    case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_SE1:
+        return 1;
+    case ARMMMUIdx_E2:
+    case ARMMMUIdx_EL20_2:
+        return 2;
+    case ARMMMUIdx_SE3:
+        return 3;
+    default:
+        g_assert_not_reached();
+    }
+}
+
 #ifndef CONFIG_TCG
 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
 {
@@ -11149,10 +11183,26 @@  ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
         return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
     }
 
-    if (el < 2 && arm_is_secure_below_el3(env)) {
-        return ARMMMUIdx_SE0 + el;
-    } else {
-        return ARMMMUIdx_EL10_0 + el;
+    switch (el) {
+    case 0:
+        /* TODO: ARMv8.1-VHE */
+        if (arm_is_secure_below_el3(env)) {
+            return ARMMMUIdx_SE0;
+        }
+        return ARMMMUIdx_EL10_0;
+    case 1:
+        if (arm_is_secure_below_el3(env)) {
+            return ARMMMUIdx_SE1;
+        }
+        return ARMMMUIdx_EL10_1;
+    case 2:
+        /* TODO: ARMv8.1-VHE */
+        /* TODO: ARMv8.4-SecEL2 */
+        return ARMMMUIdx_E2;
+    case 3:
+        return ARMMMUIdx_SE3;
+    default:
+        g_assert_not_reached();
     }
 }
 
diff --git a/target/arm/translate.c b/target/arm/translate.c
index cd757165e1..b7f726e733 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -172,7 +172,6 @@  static inline int get_a32_user_mem_index(DisasContext *s)
     case ARMMMUIdx_MSUserNegPri:
     case ARMMMUIdx_MSPrivNegPri:
         return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
-    case ARMMMUIdx_Stage2:
     default:
         g_assert_not_reached();
     }