diff mbox series

[v10.5,14/20] target/arm: Use vector infrastructure for aa64 dup/movi

Message ID 20180117161435.28981-15-richard.henderson@linaro.org
State Superseded
Headers show
Series tcg: generic vector operations | expand

Commit Message

Richard Henderson Jan. 17, 2018, 4:14 p.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 target/arm/translate-a64.c | 83 +++++++++++++++++++---------------------------
 1 file changed, 34 insertions(+), 49 deletions(-)

-- 
2.14.3

Comments

Peter Maydell Jan. 25, 2018, 4:50 p.m. UTC | #1
On 17 January 2018 at 16:14, Richard Henderson
<richard.henderson@linaro.org> wrote:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  target/arm/translate-a64.c | 83 +++++++++++++++++++---------------------------

>  1 file changed, 34 insertions(+), 49 deletions(-)

>

> diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c

> index 219cc1e19d..2495414603 100644

> --- a/target/arm/translate-a64.c

> +++ b/target/arm/translate-a64.c

> @@ -5863,38 +5863,24 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)

>   *

>   * size: encoded in imm5 (see ARM ARM LowestSetBit())

>   */

> +


Stray whitespace change

>  static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,

>                               int imm5)

>  {

>      int size = ctz32(imm5);

> -    int esize = 8 << size;

> -    int elements = (is_q ? 128 : 64) / esize;

> -    int index, i;

> -    TCGv_i64 tmp;

> +    int index = imm5 >> (size + 1);

>

>      if (size > 3 || (size == 3 && !is_q)) {

>          unallocated_encoding(s);

>          return;

>      }

> -


Ditto.


Otherwise
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>


thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 219cc1e19d..2495414603 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -5863,38 +5863,24 @@  static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
  *
  * size: encoded in imm5 (see ARM ARM LowestSetBit())
  */
+
 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
                              int imm5)
 {
     int size = ctz32(imm5);
-    int esize = 8 << size;
-    int elements = (is_q ? 128 : 64) / esize;
-    int index, i;
-    TCGv_i64 tmp;
+    int index = imm5 >> (size + 1);
 
     if (size > 3 || (size == 3 && !is_q)) {
         unallocated_encoding(s);
         return;
     }
-
     if (!fp_access_check(s)) {
         return;
     }
 
-    index = imm5 >> (size + 1);
-
-    tmp = tcg_temp_new_i64();
-    read_vec_element(s, tmp, rn, index, size);
-
-    for (i = 0; i < elements; i++) {
-        write_vec_element(s, tmp, rd, i, size);
-    }
-
-    if (!is_q) {
-        clear_vec_high(s, rd);
-    }
-
-    tcg_temp_free_i64(tmp);
+    tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
+                         vec_reg_offset(s, rn, index, size),
+                         is_q ? 16 : 8, vec_full_reg_size(s));
 }
 
 /* DUP (element, scalar)
@@ -5943,9 +5929,7 @@  static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
                              int imm5)
 {
     int size = ctz32(imm5);
-    int esize = 8 << size;
-    int elements = (is_q ? 128 : 64)/esize;
-    int i = 0;
+    uint32_t dofs, oprsz, maxsz;
 
     if (size > 3 || ((size == 3) && !is_q)) {
         unallocated_encoding(s);
@@ -5956,12 +5940,11 @@  static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
         return;
     }
 
-    for (i = 0; i < elements; i++) {
-        write_vec_element(s, cpu_reg(s, rn), rd, i, size);
-    }
-    if (!is_q) {
-        clear_vec_high(s, rd);
-    }
+    dofs = vec_full_reg_offset(s, rd);
+    oprsz = is_q ? 16 : 8;
+    maxsz = vec_full_reg_size(s);
+
+    tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
 }
 
 /* INS (Element)
@@ -6152,7 +6135,6 @@  static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
     bool is_neg = extract32(insn, 29, 1);
     bool is_q = extract32(insn, 30, 1);
     uint64_t imm = 0;
-    TCGv_i64 tcg_rd, tcg_imm;
     int i;
 
     if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
@@ -6234,32 +6216,35 @@  static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
         imm = ~imm;
     }
 
-    tcg_imm = tcg_const_i64(imm);
-    tcg_rd = new_tmp_a64(s);
+    if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
+        /* MOVI or MVNI, with MVNI negation handled above.  */
+        tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
+                            vec_full_reg_size(s), imm);
+    } else {
+        TCGv_i64 tcg_imm = tcg_const_i64(imm);
+        TCGv_i64 tcg_rd = new_tmp_a64(s);
 
-    for (i = 0; i < 2; i++) {
-        int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64);
+        for (i = 0; i < 2; i++) {
+            int foffs = vec_reg_offset(s, rd, i, MO_64);
 
-        if (i == 1 && !is_q) {
-            /* non-quad ops clear high half of vector */
-            tcg_gen_movi_i64(tcg_rd, 0);
-        } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) {
-            tcg_gen_ld_i64(tcg_rd, cpu_env, foffs);
-            if (is_neg) {
-                /* AND (BIC) */
-                tcg_gen_and_i64(tcg_rd, tcg_rd, tcg_imm);
+            if (i == 1 && !is_q) {
+                /* non-quad ops clear high half of vector */
+                tcg_gen_movi_i64(tcg_rd, 0);
             } else {
-                /* ORR */
-                tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_imm);
+                tcg_gen_ld_i64(tcg_rd, cpu_env, foffs);
+                if (is_neg) {
+                    /* AND (BIC) */
+                    tcg_gen_and_i64(tcg_rd, tcg_rd, tcg_imm);
+                } else {
+                    /* ORR */
+                    tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_imm);
+                }
             }
-        } else {
-            /* MOVI */
-            tcg_gen_mov_i64(tcg_rd, tcg_imm);
+            tcg_gen_st_i64(tcg_rd, cpu_env, foffs);
         }
-        tcg_gen_st_i64(tcg_rd, cpu_env, foffs);
-    }
 
-    tcg_temp_free_i64(tcg_imm);
+        tcg_temp_free_i64(tcg_imm);
+    }
 }
 
 /* AdvSIMD scalar copy