diff mbox series

[v4,06/40] target/arm: Split out vae1_tlbmask, vmalle1_tlbmask

Message ID 20191203022937.1474-7-richard.henderson@linaro.org
State New
Headers show
Series target/arm: Implement ARMv8.1-VHE | expand

Commit Message

Richard Henderson Dec. 3, 2019, 2:29 a.m. UTC
No functional change, but unify code sequences.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 target/arm/helper.c | 118 ++++++++++++++------------------------------
 1 file changed, 37 insertions(+), 81 deletions(-)

-- 
2.17.1

Comments

Philippe Mathieu-Daudé Dec. 3, 2019, 6:25 a.m. UTC | #1
On 12/3/19 3:29 AM, Richard Henderson wrote:
> No functional change, but unify code sequences.

> 

> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>


Easier to review in 2 patches: vae1_tlbmask first, then vmalle1_tlbmask.

If you need to respin, the 2 patches are welcome. Regardless:
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>


> ---

>   target/arm/helper.c | 118 ++++++++++++++------------------------------

>   1 file changed, 37 insertions(+), 81 deletions(-)

> 

> diff --git a/target/arm/helper.c b/target/arm/helper.c

> index 731507a82f..0b0130d814 100644

> --- a/target/arm/helper.c

> +++ b/target/arm/helper.c

> @@ -3890,70 +3890,61 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,

>    * Page D4-1736 (DDI0487A.b)

>    */

>   

> +static int vae1_tlbmask(CPUARMState *env)

> +{

> +    if (arm_is_secure_below_el3(env)) {

> +        return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;

> +    } else {

> +        return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;

> +    }

> +}

> +

>   static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,

>                                         uint64_t value)

>   {

>       CPUState *cs = env_cpu(env);

> -    bool sec = arm_is_secure_below_el3(env);

> +    int mask = vae1_tlbmask(env);

>   

> -    if (sec) {

> -        tlb_flush_by_mmuidx_all_cpus_synced(cs,

> -                                            ARMMMUIdxBit_S1SE1 |

> -                                            ARMMMUIdxBit_S1SE0);

> -    } else {

> -        tlb_flush_by_mmuidx_all_cpus_synced(cs,

> -                                            ARMMMUIdxBit_S12NSE1 |

> -                                            ARMMMUIdxBit_S12NSE0);

> -    }

> +    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);

>   }

>   

>   static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,

>                                       uint64_t value)

>   {

>       CPUState *cs = env_cpu(env);

> +    int mask = vae1_tlbmask(env);

>   

>       if (tlb_force_broadcast(env)) {

>           tlbi_aa64_vmalle1is_write(env, NULL, value);

>           return;

>       }

>   

> +    tlb_flush_by_mmuidx(cs, mask);

> +}

> +

> +static int vmalle1_tlbmask(CPUARMState *env)

> +{

> +    /*

> +     * Note that the 'ALL' scope must invalidate both stage 1 and

> +     * stage 2 translations, whereas most other scopes only invalidate

> +     * stage 1 translations.

> +     */

>       if (arm_is_secure_below_el3(env)) {

> -        tlb_flush_by_mmuidx(cs,

> -                            ARMMMUIdxBit_S1SE1 |

> -                            ARMMMUIdxBit_S1SE0);

> +        return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;

> +    } else if (arm_feature(env, ARM_FEATURE_EL2)) {

> +        return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0 | ARMMMUIdxBit_S2NS;

>       } else {

> -        tlb_flush_by_mmuidx(cs,

> -                            ARMMMUIdxBit_S12NSE1 |

> -                            ARMMMUIdxBit_S12NSE0);

> +        return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;

>       }

>   }

>   

>   static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,

>                                     uint64_t value)

>   {

> -    /* Note that the 'ALL' scope must invalidate both stage 1 and

> -     * stage 2 translations, whereas most other scopes only invalidate

> -     * stage 1 translations.

> -     */

> -    ARMCPU *cpu = env_archcpu(env);

> -    CPUState *cs = CPU(cpu);

> +    CPUState *cs = env_cpu(env);

> +    int mask = vmalle1_tlbmask(env);

>   

> -    if (arm_is_secure_below_el3(env)) {

> -        tlb_flush_by_mmuidx(cs,

> -                            ARMMMUIdxBit_S1SE1 |

> -                            ARMMMUIdxBit_S1SE0);

> -    } else {

> -        if (arm_feature(env, ARM_FEATURE_EL2)) {

> -            tlb_flush_by_mmuidx(cs,

> -                                ARMMMUIdxBit_S12NSE1 |

> -                                ARMMMUIdxBit_S12NSE0 |

> -                                ARMMMUIdxBit_S2NS);

> -        } else {

> -            tlb_flush_by_mmuidx(cs,

> -                                ARMMMUIdxBit_S12NSE1 |

> -                                ARMMMUIdxBit_S12NSE0);

> -        }

> -    }

> +    tlb_flush_by_mmuidx(cs, mask);

>   }

>   

>   static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,

> @@ -3977,28 +3968,10 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,

>   static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,

>                                       uint64_t value)

>   {

> -    /* Note that the 'ALL' scope must invalidate both stage 1 and

> -     * stage 2 translations, whereas most other scopes only invalidate

> -     * stage 1 translations.

> -     */

>       CPUState *cs = env_cpu(env);

> -    bool sec = arm_is_secure_below_el3(env);

> -    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);

> +    int mask = vmalle1_tlbmask(env);

>   

> -    if (sec) {

> -        tlb_flush_by_mmuidx_all_cpus_synced(cs,

> -                                            ARMMMUIdxBit_S1SE1 |

> -                                            ARMMMUIdxBit_S1SE0);

> -    } else if (has_el2) {

> -        tlb_flush_by_mmuidx_all_cpus_synced(cs,

> -                                            ARMMMUIdxBit_S12NSE1 |

> -                                            ARMMMUIdxBit_S12NSE0 |

> -                                            ARMMMUIdxBit_S2NS);

> -    } else {

> -          tlb_flush_by_mmuidx_all_cpus_synced(cs,

> -                                              ARMMMUIdxBit_S12NSE1 |

> -                                              ARMMMUIdxBit_S12NSE0);

> -    }

> +    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);

>   }

>   

>   static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,

> @@ -4048,20 +4021,11 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,

>   static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,

>                                      uint64_t value)

>   {

> -    ARMCPU *cpu = env_archcpu(env);

> -    CPUState *cs = CPU(cpu);

> -    bool sec = arm_is_secure_below_el3(env);

> +    CPUState *cs = env_cpu(env);

> +    int mask = vae1_tlbmask(env);

>       uint64_t pageaddr = sextract64(value << 12, 0, 56);

>   

> -    if (sec) {

> -        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,

> -                                                 ARMMMUIdxBit_S1SE1 |

> -                                                 ARMMMUIdxBit_S1SE0);

> -    } else {

> -        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,

> -                                                 ARMMMUIdxBit_S12NSE1 |

> -                                                 ARMMMUIdxBit_S12NSE0);

> -    }

> +    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);

>   }

>   

>   static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,

> @@ -4072,8 +4036,8 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,

>        * since we don't support flush-for-specific-ASID-only or

>        * flush-last-level-only.

>        */

> -    ARMCPU *cpu = env_archcpu(env);

> -    CPUState *cs = CPU(cpu);

> +    CPUState *cs = env_cpu(env);

> +    int mask = vae1_tlbmask(env);

>       uint64_t pageaddr = sextract64(value << 12, 0, 56);

>   

>       if (tlb_force_broadcast(env)) {

> @@ -4081,15 +4045,7 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,

>           return;

>       }

>   

> -    if (arm_is_secure_below_el3(env)) {

> -        tlb_flush_page_by_mmuidx(cs, pageaddr,

> -                                 ARMMMUIdxBit_S1SE1 |

> -                                 ARMMMUIdxBit_S1SE0);

> -    } else {

> -        tlb_flush_page_by_mmuidx(cs, pageaddr,

> -                                 ARMMMUIdxBit_S12NSE1 |

> -                                 ARMMMUIdxBit_S12NSE0);

> -    }

> +    tlb_flush_page_by_mmuidx(cs, pageaddr, mask);

>   }

>   

>   static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,

>
Richard Henderson Dec. 3, 2019, 10:01 p.m. UTC | #2
On 12/2/19 10:25 PM, Philippe Mathieu-Daudé wrote:
> On 12/3/19 3:29 AM, Richard Henderson wrote:

>> No functional change, but unify code sequences.

>>

>> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> 

> Easier to review in 2 patches: vae1_tlbmask first, then vmalle1_tlbmask.


Ok, done.


r~
diff mbox series

Patch

diff --git a/target/arm/helper.c b/target/arm/helper.c
index 731507a82f..0b0130d814 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3890,70 +3890,61 @@  static CPAccessResult aa64_cacheop_access(CPUARMState *env,
  * Page D4-1736 (DDI0487A.b)
  */
 
+static int vae1_tlbmask(CPUARMState *env)
+{
+    if (arm_is_secure_below_el3(env)) {
+        return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
+    } else {
+        return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
+    }
+}
+
 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                       uint64_t value)
 {
     CPUState *cs = env_cpu(env);
-    bool sec = arm_is_secure_below_el3(env);
+    int mask = vae1_tlbmask(env);
 
-    if (sec) {
-        tlb_flush_by_mmuidx_all_cpus_synced(cs,
-                                            ARMMMUIdxBit_S1SE1 |
-                                            ARMMMUIdxBit_S1SE0);
-    } else {
-        tlb_flush_by_mmuidx_all_cpus_synced(cs,
-                                            ARMMMUIdxBit_S12NSE1 |
-                                            ARMMMUIdxBit_S12NSE0);
-    }
+    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
 }
 
 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                     uint64_t value)
 {
     CPUState *cs = env_cpu(env);
+    int mask = vae1_tlbmask(env);
 
     if (tlb_force_broadcast(env)) {
         tlbi_aa64_vmalle1is_write(env, NULL, value);
         return;
     }
 
+    tlb_flush_by_mmuidx(cs, mask);
+}
+
+static int vmalle1_tlbmask(CPUARMState *env)
+{
+    /*
+     * Note that the 'ALL' scope must invalidate both stage 1 and
+     * stage 2 translations, whereas most other scopes only invalidate
+     * stage 1 translations.
+     */
     if (arm_is_secure_below_el3(env)) {
-        tlb_flush_by_mmuidx(cs,
-                            ARMMMUIdxBit_S1SE1 |
-                            ARMMMUIdxBit_S1SE0);
+        return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
+    } else if (arm_feature(env, ARM_FEATURE_EL2)) {
+        return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0 | ARMMMUIdxBit_S2NS;
     } else {
-        tlb_flush_by_mmuidx(cs,
-                            ARMMMUIdxBit_S12NSE1 |
-                            ARMMMUIdxBit_S12NSE0);
+        return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
     }
 }
 
 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                   uint64_t value)
 {
-    /* Note that the 'ALL' scope must invalidate both stage 1 and
-     * stage 2 translations, whereas most other scopes only invalidate
-     * stage 1 translations.
-     */
-    ARMCPU *cpu = env_archcpu(env);
-    CPUState *cs = CPU(cpu);
+    CPUState *cs = env_cpu(env);
+    int mask = vmalle1_tlbmask(env);
 
-    if (arm_is_secure_below_el3(env)) {
-        tlb_flush_by_mmuidx(cs,
-                            ARMMMUIdxBit_S1SE1 |
-                            ARMMMUIdxBit_S1SE0);
-    } else {
-        if (arm_feature(env, ARM_FEATURE_EL2)) {
-            tlb_flush_by_mmuidx(cs,
-                                ARMMMUIdxBit_S12NSE1 |
-                                ARMMMUIdxBit_S12NSE0 |
-                                ARMMMUIdxBit_S2NS);
-        } else {
-            tlb_flush_by_mmuidx(cs,
-                                ARMMMUIdxBit_S12NSE1 |
-                                ARMMMUIdxBit_S12NSE0);
-        }
-    }
+    tlb_flush_by_mmuidx(cs, mask);
 }
 
 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3977,28 +3968,10 @@  static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                     uint64_t value)
 {
-    /* Note that the 'ALL' scope must invalidate both stage 1 and
-     * stage 2 translations, whereas most other scopes only invalidate
-     * stage 1 translations.
-     */
     CPUState *cs = env_cpu(env);
-    bool sec = arm_is_secure_below_el3(env);
-    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
+    int mask = vmalle1_tlbmask(env);
 
-    if (sec) {
-        tlb_flush_by_mmuidx_all_cpus_synced(cs,
-                                            ARMMMUIdxBit_S1SE1 |
-                                            ARMMMUIdxBit_S1SE0);
-    } else if (has_el2) {
-        tlb_flush_by_mmuidx_all_cpus_synced(cs,
-                                            ARMMMUIdxBit_S12NSE1 |
-                                            ARMMMUIdxBit_S12NSE0 |
-                                            ARMMMUIdxBit_S2NS);
-    } else {
-          tlb_flush_by_mmuidx_all_cpus_synced(cs,
-                                              ARMMMUIdxBit_S12NSE1 |
-                                              ARMMMUIdxBit_S12NSE0);
-    }
+    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
 }
 
 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4048,20 +4021,11 @@  static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
                                    uint64_t value)
 {
-    ARMCPU *cpu = env_archcpu(env);
-    CPUState *cs = CPU(cpu);
-    bool sec = arm_is_secure_below_el3(env);
+    CPUState *cs = env_cpu(env);
+    int mask = vae1_tlbmask(env);
     uint64_t pageaddr = sextract64(value << 12, 0, 56);
 
-    if (sec) {
-        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
-                                                 ARMMMUIdxBit_S1SE1 |
-                                                 ARMMMUIdxBit_S1SE0);
-    } else {
-        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
-                                                 ARMMMUIdxBit_S12NSE1 |
-                                                 ARMMMUIdxBit_S12NSE0);
-    }
+    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
 }
 
 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4072,8 +4036,8 @@  static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
      * since we don't support flush-for-specific-ASID-only or
      * flush-last-level-only.
      */
-    ARMCPU *cpu = env_archcpu(env);
-    CPUState *cs = CPU(cpu);
+    CPUState *cs = env_cpu(env);
+    int mask = vae1_tlbmask(env);
     uint64_t pageaddr = sextract64(value << 12, 0, 56);
 
     if (tlb_force_broadcast(env)) {
@@ -4081,15 +4045,7 @@  static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
         return;
     }
 
-    if (arm_is_secure_below_el3(env)) {
-        tlb_flush_page_by_mmuidx(cs, pageaddr,
-                                 ARMMMUIdxBit_S1SE1 |
-                                 ARMMMUIdxBit_S1SE0);
-    } else {
-        tlb_flush_page_by_mmuidx(cs, pageaddr,
-                                 ARMMMUIdxBit_S12NSE1 |
-                                 ARMMMUIdxBit_S12NSE0);
-    }
+    tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
 }
 
 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,