diff mbox series

[12/38] tcg: Add gvec expanders for variable shift

Message ID 20190420073442.7488-13-richard.henderson@linaro.org
State New
Headers show
Series tcg vector improvements | expand

Commit Message

Richard Henderson April 20, 2019, 7:34 a.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 accel/tcg/tcg-runtime.h      |  15 ++++
 tcg/tcg-op-gvec.h            |   7 ++
 tcg/tcg-op.h                 |   4 ++
 accel/tcg/tcg-runtime-gvec.c | 132 +++++++++++++++++++++++++++++++++++
 tcg/tcg-op-gvec.c            |  87 +++++++++++++++++++++++
 tcg/tcg-op-vec.c             |  15 ++++
 6 files changed, 260 insertions(+)

-- 
2.17.1

Comments

David Hildenbrand April 23, 2019, 7:04 p.m. UTC | #1
In order to use this on s390x for VECTOR ELEMENT SHIFT, like

+static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)
+{
+    const uint8_t es = get_field(s->fields, m4);
+    const uint8_t v1 = get_field(s->fields, v1);
+    const uint8_t v2 = get_field(s->fields, v2);
+    const uint8_t v3 = get_field(s->fields, v3);
+
+    if (es > ES_64) {
+        gen_program_exception(s, PGM_SPECIFICATION);
+        return DISAS_NORETURN;
+    }
+
+    switch (s->fields->op2) {
+    case 0x70:
+        gen_gvec_fn_3(shlv, es, v1, v2, v3);
+        break;
+    case 0x7a:
+        gen_gvec_fn_3(sarv, es, v1, v2, v3);
+        break;
+    case 0x78:
+        gen_gvec_fn_3(shrv, es, v1, v2, v3);
+        break;
+    default:
+        g_assert_not_reached();
+    }
+
+    return DISAS_NEXT;
+}

We need to mask of invalid bits from the shift. Can that be added?


On 20.04.19 09:34, Richard Henderson wrote:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  accel/tcg/tcg-runtime.h      |  15 ++++

>  tcg/tcg-op-gvec.h            |   7 ++

>  tcg/tcg-op.h                 |   4 ++

>  accel/tcg/tcg-runtime-gvec.c | 132 +++++++++++++++++++++++++++++++++++

>  tcg/tcg-op-gvec.c            |  87 +++++++++++++++++++++++

>  tcg/tcg-op-vec.c             |  15 ++++

>  6 files changed, 260 insertions(+)

> 

> diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h

> index dfe325625c..ed3ce5fd91 100644

> --- a/accel/tcg/tcg-runtime.h

> +++ b/accel/tcg/tcg-runtime.h

> @@ -254,6 +254,21 @@ DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)

>  DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)

>  DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)

>  

> +DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +

> +DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +

> +DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +

>  DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

>  DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

>  DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> diff --git a/tcg/tcg-op-gvec.h b/tcg/tcg-op-gvec.h

> index 850da32ded..1cd18a959a 100644

> --- a/tcg/tcg-op-gvec.h

> +++ b/tcg/tcg-op-gvec.h

> @@ -294,6 +294,13 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,

>  void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,

>                         int64_t shift, uint32_t oprsz, uint32_t maxsz);

>  

> +void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,

> +                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz);

> +void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,

> +                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz);

> +void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,

> +                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz);

> +

>  void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,

>                        uint32_t aofs, uint32_t bofs,

>                        uint32_t oprsz, uint32_t maxsz);

> diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h

> index 9fff9864f6..833c6330b5 100644

> --- a/tcg/tcg-op.h

> +++ b/tcg/tcg-op.h

> @@ -986,6 +986,10 @@ void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);

>  void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);

>  void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);

>  

> +void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);

> +void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);

> +void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);

> +

>  void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,

>                       TCGv_vec a, TCGv_vec b);

>  

> diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c

> index e2c6f24262..7b88f5590c 100644

> --- a/accel/tcg/tcg-runtime-gvec.c

> +++ b/accel/tcg/tcg-runtime-gvec.c

> @@ -725,6 +725,138 @@ void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc)

>      clear_high(d, oprsz, desc);

>  }

>  

> +void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint8_t)) {

> +        *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << *(uint8_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint16_t)) {

> +        *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << *(uint16_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint32_t)) {

> +        *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << *(uint32_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint64_t)) {

> +        *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << *(uint64_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint8_t)) {

> +        *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> *(uint8_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint16_t)) {

> +        *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> *(uint16_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint32_t)) {

> +        *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> *(uint32_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(uint64_t)) {

> +        *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> *(uint64_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(vec8)) {

> +        *(int8_t *)(d + i) = *(int8_t *)(a + i) >> *(int8_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(int16_t)) {

> +        *(int16_t *)(d + i) = *(int16_t *)(a + i) >> *(int16_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(vec32)) {

> +        *(int32_t *)(d + i) = *(int32_t *)(a + i) >> *(int32_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

> +void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)

> +{

> +    intptr_t oprsz = simd_oprsz(desc);

> +    intptr_t i;

> +

> +    for (i = 0; i < oprsz; i += sizeof(vec64)) {

> +        *(int64_t *)(d + i) = *(int64_t *)(a + i) >> *(int64_t *)(b + i);

> +    }

> +    clear_high(d, oprsz, desc);

> +}

> +

>  /* If vectors are enabled, the compiler fills in -1 for true.

>     Otherwise, we must take care of this by hand.  */

>  #ifdef CONFIG_VECTOR16

> diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c

> index f056018713..5d28184045 100644

> --- a/tcg/tcg-op-gvec.c

> +++ b/tcg/tcg-op-gvec.c

> @@ -2382,6 +2382,93 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,

>      }

>  }

>  

> +void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,

> +                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz)

> +{

> +    static const GVecGen3 g[4] = {

> +        { .fniv = tcg_gen_shlv_vec,

> +          .fno = gen_helper_gvec_shl8v,

> +          .opc = INDEX_op_shlv_vec,

> +          .vece = MO_8 },

> +        { .fniv = tcg_gen_shlv_vec,

> +          .fno = gen_helper_gvec_shl16v,

> +          .opc = INDEX_op_shlv_vec,

> +          .vece = MO_16 },

> +        { .fni4 = tcg_gen_shl_i32,

> +          .fniv = tcg_gen_shlv_vec,

> +          .fno = gen_helper_gvec_shl32v,

> +          .opc = INDEX_op_shlv_vec,

> +          .vece = MO_32 },

> +        { .fni8 = tcg_gen_shl_i64,

> +          .fniv = tcg_gen_shlv_vec,

> +          .fno = gen_helper_gvec_shl64v,

> +          .opc = INDEX_op_shlv_vec,

> +          .prefer_i64 = TCG_TARGET_REG_BITS == 64,

> +          .vece = MO_64 },

> +    };

> +

> +    tcg_debug_assert(vece <= MO_64);

> +    tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);

> +}

> +

> +void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,

> +                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz)

> +{

> +    static const GVecGen3 g[4] = {

> +        { .fniv = tcg_gen_shrv_vec,

> +          .fno = gen_helper_gvec_shr8v,

> +          .opc = INDEX_op_shrv_vec,

> +          .vece = MO_8 },

> +        { .fniv = tcg_gen_shrv_vec,

> +          .fno = gen_helper_gvec_shr16v,

> +          .opc = INDEX_op_shrv_vec,

> +          .vece = MO_16 },

> +        { .fni4 = tcg_gen_shr_i32,

> +          .fniv = tcg_gen_shrv_vec,

> +          .fno = gen_helper_gvec_shr32v,

> +          .opc = INDEX_op_shrv_vec,

> +          .vece = MO_32 },

> +        { .fni8 = tcg_gen_shr_i64,

> +          .fniv = tcg_gen_shrv_vec,

> +          .fno = gen_helper_gvec_shr64v,

> +          .opc = INDEX_op_shrv_vec,

> +          .prefer_i64 = TCG_TARGET_REG_BITS == 64,

> +          .vece = MO_64 },

> +    };

> +

> +    tcg_debug_assert(vece <= MO_64);

> +    tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);

> +}

> +

> +void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,

> +                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz)

> +{

> +    static const GVecGen3 g[4] = {

> +        { .fniv = tcg_gen_sarv_vec,

> +          .fno = gen_helper_gvec_sar8v,

> +          .opc = INDEX_op_sarv_vec,

> +          .vece = MO_8 },

> +        { .fniv = tcg_gen_sarv_vec,

> +          .fno = gen_helper_gvec_sar16v,

> +          .opc = INDEX_op_sarv_vec,

> +          .vece = MO_16 },

> +        { .fni4 = tcg_gen_sar_i32,

> +          .fniv = tcg_gen_sarv_vec,

> +          .fno = gen_helper_gvec_sar32v,

> +          .opc = INDEX_op_sarv_vec,

> +          .vece = MO_32 },

> +        { .fni8 = tcg_gen_sar_i64,

> +          .fniv = tcg_gen_sarv_vec,

> +          .fno = gen_helper_gvec_sar64v,

> +          .opc = INDEX_op_sarv_vec,

> +          .prefer_i64 = TCG_TARGET_REG_BITS == 64,

> +          .vece = MO_64 },

> +    };

> +

> +    tcg_debug_assert(vece <= MO_64);

> +    tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);

> +}

> +

>  /* Expand OPSZ bytes worth of three-operand operations using i32 elements.  */

>  static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,

>                             uint32_t oprsz, TCGCond cond)

> diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c

> index ce7987b858..6601cb8a8f 100644

> --- a/tcg/tcg-op-vec.c

> +++ b/tcg/tcg-op-vec.c

> @@ -481,3 +481,18 @@ void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)

>  {

>      do_op3(vece, r, a, b, INDEX_op_umax_vec);

>  }

> +

> +void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)

> +{

> +    do_op3(vece, r, a, b, INDEX_op_shlv_vec);

> +}

> +

> +void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)

> +{

> +    do_op3(vece, r, a, b, INDEX_op_shrv_vec);

> +}

> +

> +void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)

> +{

> +    do_op3(vece, r, a, b, INDEX_op_sarv_vec);

> +}

> 



-- 

Thanks,

David / dhildenb
Richard Henderson April 23, 2019, 7:28 p.m. UTC | #2
On 4/23/19 12:04 PM, David Hildenbrand wrote:
> In order to use this on s390x for VECTOR ELEMENT SHIFT, like

> 

> +static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)

> +{

> +    const uint8_t es = get_field(s->fields, m4);

> +    const uint8_t v1 = get_field(s->fields, v1);

> +    const uint8_t v2 = get_field(s->fields, v2);

> +    const uint8_t v3 = get_field(s->fields, v3);

> +

> +    if (es > ES_64) {

> +        gen_program_exception(s, PGM_SPECIFICATION);

> +        return DISAS_NORETURN;

> +    }

> +

> +    switch (s->fields->op2) {

> +    case 0x70:

> +        gen_gvec_fn_3(shlv, es, v1, v2, v3);

> +        break;

> +    case 0x7a:

> +        gen_gvec_fn_3(sarv, es, v1, v2, v3);

> +        break;

> +    case 0x78:

> +        gen_gvec_fn_3(shrv, es, v1, v2, v3);

> +        break;

> +    default:

> +        g_assert_not_reached();

> +    }

> +

> +    return DISAS_NEXT;

> +}

> 

> We need to mask of invalid bits from the shift. Can that be added?


Yes, I do exactly this in patch 31 for target/ppc.


r~
David Hildenbrand April 23, 2019, 9:02 p.m. UTC | #3
On 23.04.19 21:28, Richard Henderson wrote:
> On 4/23/19 12:04 PM, David Hildenbrand wrote:

>> In order to use this on s390x for VECTOR ELEMENT SHIFT, like

>>

>> +static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)

>> +{

>> +    const uint8_t es = get_field(s->fields, m4);

>> +    const uint8_t v1 = get_field(s->fields, v1);

>> +    const uint8_t v2 = get_field(s->fields, v2);

>> +    const uint8_t v3 = get_field(s->fields, v3);

>> +

>> +    if (es > ES_64) {

>> +        gen_program_exception(s, PGM_SPECIFICATION);

>> +        return DISAS_NORETURN;

>> +    }

>> +

>> +    switch (s->fields->op2) {

>> +    case 0x70:

>> +        gen_gvec_fn_3(shlv, es, v1, v2, v3);

>> +        break;

>> +    case 0x7a:

>> +        gen_gvec_fn_3(sarv, es, v1, v2, v3);

>> +        break;

>> +    case 0x78:

>> +        gen_gvec_fn_3(shrv, es, v1, v2, v3);

>> +        break;

>> +    default:

>> +        g_assert_not_reached();

>> +    }

>> +

>> +    return DISAS_NEXT;

>> +}

>>

>> We need to mask of invalid bits from the shift. Can that be added?

> 

> Yes, I do exactly this in patch 31 for target/ppc.

> 

> 

> r~

> 


Got it, so not via a generic gvec expansion. Thanks!

-- 

Thanks,

David / dhildenb
Richard Henderson April 23, 2019, 9:40 p.m. UTC | #4
On 4/23/19 2:02 PM, David Hildenbrand wrote:
> On 23.04.19 21:28, Richard Henderson wrote:

>> On 4/23/19 12:04 PM, David Hildenbrand wrote:

>>> In order to use this on s390x for VECTOR ELEMENT SHIFT, like

>>>

>>> +static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)

>>> +{

>>> +    const uint8_t es = get_field(s->fields, m4);

>>> +    const uint8_t v1 = get_field(s->fields, v1);

>>> +    const uint8_t v2 = get_field(s->fields, v2);

>>> +    const uint8_t v3 = get_field(s->fields, v3);

>>> +

>>> +    if (es > ES_64) {

>>> +        gen_program_exception(s, PGM_SPECIFICATION);

>>> +        return DISAS_NORETURN;

>>> +    }

>>> +

>>> +    switch (s->fields->op2) {

>>> +    case 0x70:

>>> +        gen_gvec_fn_3(shlv, es, v1, v2, v3);

>>> +        break;

>>> +    case 0x7a:

>>> +        gen_gvec_fn_3(sarv, es, v1, v2, v3);

>>> +        break;

>>> +    case 0x78:

>>> +        gen_gvec_fn_3(shrv, es, v1, v2, v3);

>>> +        break;

>>> +    default:

>>> +        g_assert_not_reached();

>>> +    }

>>> +

>>> +    return DISAS_NEXT;

>>> +}

>>>

>>> We need to mask of invalid bits from the shift. Can that be added?

>>

>> Yes, I do exactly this in patch 31 for target/ppc.

>>

>>

>> r~

>>

> 

> Got it, so not via a generic gvec expansion. Thanks!


I do wonder if I *should* make the truncating expansion the
generic gvec expansion.  It would be usable from two targets
at least...

Because the one's that *don't* truncate in hardware will
still have to have their own custom expansion code.

Thoughts?


r~
David Hildenbrand April 23, 2019, 9:57 p.m. UTC | #5
On 23.04.19 23:40, Richard Henderson wrote:
> On 4/23/19 2:02 PM, David Hildenbrand wrote:

>> On 23.04.19 21:28, Richard Henderson wrote:

>>> On 4/23/19 12:04 PM, David Hildenbrand wrote:

>>>> In order to use this on s390x for VECTOR ELEMENT SHIFT, like

>>>>

>>>> +static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)

>>>> +{

>>>> +    const uint8_t es = get_field(s->fields, m4);

>>>> +    const uint8_t v1 = get_field(s->fields, v1);

>>>> +    const uint8_t v2 = get_field(s->fields, v2);

>>>> +    const uint8_t v3 = get_field(s->fields, v3);

>>>> +

>>>> +    if (es > ES_64) {

>>>> +        gen_program_exception(s, PGM_SPECIFICATION);

>>>> +        return DISAS_NORETURN;

>>>> +    }

>>>> +

>>>> +    switch (s->fields->op2) {

>>>> +    case 0x70:

>>>> +        gen_gvec_fn_3(shlv, es, v1, v2, v3);

>>>> +        break;

>>>> +    case 0x7a:

>>>> +        gen_gvec_fn_3(sarv, es, v1, v2, v3);

>>>> +        break;

>>>> +    case 0x78:

>>>> +        gen_gvec_fn_3(shrv, es, v1, v2, v3);

>>>> +        break;

>>>> +    default:

>>>> +        g_assert_not_reached();

>>>> +    }

>>>> +

>>>> +    return DISAS_NEXT;

>>>> +}

>>>>

>>>> We need to mask of invalid bits from the shift. Can that be added?

>>>

>>> Yes, I do exactly this in patch 31 for target/ppc.

>>>

>>>

>>> r~

>>>

>>

>> Got it, so not via a generic gvec expansion. Thanks!

> 

> I do wonder if I *should* make the truncating expansion the

> generic gvec expansion.  It would be usable from two targets

> at least...

> 

> Because the one's that *don't* truncate in hardware will

> still have to have their own custom expansion code.

> 

> Thoughts?

> 


Exactly what I had in mind. I wonder if the same applies to all shift
helpers/expansions. Expected behavior (mask off bits) is always better
than unexpected behavior. But of course I might be wrong. At least, here
I would vote for doing the truncation in the generic gvec expansion.

> 

> r~

> 



-- 

Thanks,

David / dhildenb
diff mbox series

Patch

diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
index dfe325625c..ed3ce5fd91 100644
--- a/accel/tcg/tcg-runtime.h
+++ b/accel/tcg/tcg-runtime.h
@@ -254,6 +254,21 @@  DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/tcg/tcg-op-gvec.h b/tcg/tcg-op-gvec.h
index 850da32ded..1cd18a959a 100644
--- a/tcg/tcg-op-gvec.h
+++ b/tcg/tcg-op-gvec.h
@@ -294,6 +294,13 @@  void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
 void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
                        int64_t shift, uint32_t oprsz, uint32_t maxsz);
 
+void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
+                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
+
 void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
                       uint32_t aofs, uint32_t bofs,
                       uint32_t oprsz, uint32_t maxsz);
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 9fff9864f6..833c6330b5 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -986,6 +986,10 @@  void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
 void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
 void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
 
+void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
+
 void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
                      TCGv_vec a, TCGv_vec b);
 
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
index e2c6f24262..7b88f5590c 100644
--- a/accel/tcg/tcg-runtime-gvec.c
+++ b/accel/tcg/tcg-runtime-gvec.c
@@ -725,6 +725,138 @@  void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc)
     clear_high(d, oprsz, desc);
 }
 
+void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
+        *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << *(uint8_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
+        *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << *(uint16_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
+        *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << *(uint32_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+        *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << *(uint64_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
+        *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> *(uint8_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
+        *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> *(uint16_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
+        *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> *(uint32_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+        *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> *(uint64_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(vec8)) {
+        *(int8_t *)(d + i) = *(int8_t *)(a + i) >> *(int8_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(int16_t)) {
+        *(int16_t *)(d + i) = *(int16_t *)(a + i) >> *(int16_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(vec32)) {
+        *(int32_t *)(d + i) = *(int32_t *)(a + i) >> *(int32_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(vec64)) {
+        *(int64_t *)(d + i) = *(int64_t *)(a + i) >> *(int64_t *)(b + i);
+    }
+    clear_high(d, oprsz, desc);
+}
+
 /* If vectors are enabled, the compiler fills in -1 for true.
    Otherwise, we must take care of this by hand.  */
 #ifdef CONFIG_VECTOR16
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index f056018713..5d28184045 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -2382,6 +2382,93 @@  void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
     }
 }
 
+void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
+                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+    static const GVecGen3 g[4] = {
+        { .fniv = tcg_gen_shlv_vec,
+          .fno = gen_helper_gvec_shl8v,
+          .opc = INDEX_op_shlv_vec,
+          .vece = MO_8 },
+        { .fniv = tcg_gen_shlv_vec,
+          .fno = gen_helper_gvec_shl16v,
+          .opc = INDEX_op_shlv_vec,
+          .vece = MO_16 },
+        { .fni4 = tcg_gen_shl_i32,
+          .fniv = tcg_gen_shlv_vec,
+          .fno = gen_helper_gvec_shl32v,
+          .opc = INDEX_op_shlv_vec,
+          .vece = MO_32 },
+        { .fni8 = tcg_gen_shl_i64,
+          .fniv = tcg_gen_shlv_vec,
+          .fno = gen_helper_gvec_shl64v,
+          .opc = INDEX_op_shlv_vec,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .vece = MO_64 },
+    };
+
+    tcg_debug_assert(vece <= MO_64);
+    tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+}
+
+void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
+                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+    static const GVecGen3 g[4] = {
+        { .fniv = tcg_gen_shrv_vec,
+          .fno = gen_helper_gvec_shr8v,
+          .opc = INDEX_op_shrv_vec,
+          .vece = MO_8 },
+        { .fniv = tcg_gen_shrv_vec,
+          .fno = gen_helper_gvec_shr16v,
+          .opc = INDEX_op_shrv_vec,
+          .vece = MO_16 },
+        { .fni4 = tcg_gen_shr_i32,
+          .fniv = tcg_gen_shrv_vec,
+          .fno = gen_helper_gvec_shr32v,
+          .opc = INDEX_op_shrv_vec,
+          .vece = MO_32 },
+        { .fni8 = tcg_gen_shr_i64,
+          .fniv = tcg_gen_shrv_vec,
+          .fno = gen_helper_gvec_shr64v,
+          .opc = INDEX_op_shrv_vec,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .vece = MO_64 },
+    };
+
+    tcg_debug_assert(vece <= MO_64);
+    tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+}
+
+void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
+                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+    static const GVecGen3 g[4] = {
+        { .fniv = tcg_gen_sarv_vec,
+          .fno = gen_helper_gvec_sar8v,
+          .opc = INDEX_op_sarv_vec,
+          .vece = MO_8 },
+        { .fniv = tcg_gen_sarv_vec,
+          .fno = gen_helper_gvec_sar16v,
+          .opc = INDEX_op_sarv_vec,
+          .vece = MO_16 },
+        { .fni4 = tcg_gen_sar_i32,
+          .fniv = tcg_gen_sarv_vec,
+          .fno = gen_helper_gvec_sar32v,
+          .opc = INDEX_op_sarv_vec,
+          .vece = MO_32 },
+        { .fni8 = tcg_gen_sar_i64,
+          .fniv = tcg_gen_sarv_vec,
+          .fno = gen_helper_gvec_sar64v,
+          .opc = INDEX_op_sarv_vec,
+          .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+          .vece = MO_64 },
+    };
+
+    tcg_debug_assert(vece <= MO_64);
+    tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
+}
+
 /* Expand OPSZ bytes worth of three-operand operations using i32 elements.  */
 static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
                            uint32_t oprsz, TCGCond cond)
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index ce7987b858..6601cb8a8f 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -481,3 +481,18 @@  void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
 {
     do_op3(vece, r, a, b, INDEX_op_umax_vec);
 }
+
+void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+    do_op3(vece, r, a, b, INDEX_op_shlv_vec);
+}
+
+void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+    do_op3(vece, r, a, b, INDEX_op_shrv_vec);
+}
+
+void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+    do_op3(vece, r, a, b, INDEX_op_sarv_vec);
+}