@@ -4663,6 +4663,21 @@ static int vae1_tlbmask(CPUARMState *env)
return mask;
}
+static int vae2_tlbmask(CPUARMState *env)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ uint16_t mask;
+
+ if (hcr & HCR_E2H) {
+ mask = ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E20_0;
+ } else {
+ mask = ARMMMUIdxBit_E2;
+ }
+ return mask;
+}
+
/* Return 56 if TBI is enabled, 64 otherwise. */
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
uint64_t addr)
@@ -4689,6 +4704,25 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
return tlbbits_for_regime(env, mmu_idx, addr);
}
+static int vae2_tlbbits(CPUARMState *env, uint64_t addr)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ ARMMMUIdx mmu_idx;
+
+ /*
+ * Only the regime of the mmu_idx below is significant.
+ * Regime EL2&0 has two ranges with separate TBI configuration, while EL2
+ * only has one.
+ */
+ if (hcr & HCR_E2H) {
+ mmu_idx = ARMMMUIdx_E20_2;
+ } else {
+ mmu_idx = ARMMMUIdx_E2;
+ }
+
+ return tlbbits_for_regime(env, mmu_idx, addr);
+}
+
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -4781,10 +4815,11 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
* flush-last-level-only.
*/
CPUState *cs = env_cpu(env);
- int mask = e2_tlbmask(env);
+ int mask = vae2_tlbmask(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = vae2_tlbbits(env, pageaddr);
- tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
+ tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
}
static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4838,11 +4873,11 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
CPUState *cs = env_cpu(env);
+ int mask = vae2_tlbmask(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
+ int bits = vae2_tlbbits(env, pageaddr);
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E2, bits);
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -5014,11 +5049,6 @@ static void tlbi_aa64_rvae1is_write(CPUARMState *env,
do_rvae_write(env, value, vae1_tlbmask(env), true);
}
-static int vae2_tlbmask(CPUARMState *env)
-{
- return ARMMMUIdxBit_E2;
-}
-
static void tlbi_aa64_rvae2_write(CPUARMState *env,
const ARMCPRegInfo *ri,
uint64_t value)
When HCR_EL2.E2H is enabled, TLB entries are formed using the EL2&0 translation regime, instead of the EL2 translation regime. The TLB VAE2* instructions invalidate the regime that corresponds to the current value of HCR_EL2.E2H. At the moment we only invalidate the EL2 translation regime. This causes problems with RMM, which issues TLBI VAE2IS instructions with HCR_EL2.E2H enabled. Update vae2_tlbmask() to take HCR_EL2.E2H into account. Add vae2_tlbbits() as well, since the top-byte-ignore configuration is different between the EL2&0 and EL2 regime. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- target/arm/helper.c | 50 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 10 deletions(-)