@@ -152,6 +152,11 @@ DEF_HELPER_FLAGS_4(gvec_sub16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sub32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_sub64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
@@ -184,6 +184,7 @@ extern bool have_avx2;
#define TCG_TARGET_HAS_uzp_vec 0
#define TCG_TARGET_HAS_trn_vec 0
#define TCG_TARGET_HAS_cmp_vec 1
+#define TCG_TARGET_HAS_mul_vec 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
(((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \
@@ -134,6 +134,8 @@ void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t opsz, uint32_t clsz);
void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t opsz, uint32_t clsz);
+void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t opsz, uint32_t clsz);
void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t bofs, uint32_t opsz, uint32_t clsz);
@@ -920,6 +920,7 @@ void tcg_gen_movi_v128(TCGv_vec, uint64_t, uint64_t);
void tcg_gen_movi_v256(TCGv_vec, uint64_t, uint64_t, uint64_t, uint64_t);
void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
+void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
@@ -220,6 +220,7 @@ DEF(st_vec, 0, 2, 1, IMPLVEC)
DEF(add_vec, 1, 2, 0, IMPLVEC)
DEF(sub_vec, 1, 2, 0, IMPLVEC)
+DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec))
DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec))
DEF(and_vec, 1, 2, 0, IMPLVEC)
@@ -185,6 +185,7 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_uzp_vec 0
#define TCG_TARGET_HAS_trn_vec 0
#define TCG_TARGET_HAS_cmp_vec 0
+#define TCG_TARGET_HAS_mul_vec 0
#else
#define TCG_TARGET_MAYBE_vec 1
#endif
@@ -141,6 +141,50 @@ void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc)
clear_high(d, oprsz, desc);
}
+void HELPER(gvec_mul8)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(vec8)) {
+ *(vec8 *)(d + i) = *(vec8 *)(a + i) * *(vec8 *)(b + i);
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_mul16)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(vec16)) {
+ *(vec16 *)(d + i) = *(vec16 *)(a + i) * *(vec16 *)(b + i);
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_mul32)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(vec32)) {
+ *(vec32 *)(d + i) = *(vec32 *)(a + i) * *(vec32 *)(b + i);
+ }
+ clear_high(d, oprsz, desc);
+}
+
+void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(vec64)) {
+ *(vec64 *)(d + i) = *(vec64 *)(a + i) * *(vec64 *)(b + i);
+ }
+ clear_high(d, oprsz, desc);
+}
+
void HELPER(gvec_neg8)(void *d, void *a, uint32_t desc)
{
intptr_t oprsz = simd_oprsz(desc);
@@ -1026,6 +1026,35 @@ void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
tcg_gen_gvec_3(dofs, aofs, bofs, opsz, maxsz, &g[vece]);
}
+void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t opsz, uint32_t maxsz)
+{
+ static const GVecGen3 g[4] = {
+ { .fniv = tcg_gen_mul_vec,
+ .fno = gen_helper_gvec_mul8,
+ .opc = INDEX_op_mul_vec,
+ .vece = MO_8 },
+ { .fniv = tcg_gen_mul_vec,
+ .fno = gen_helper_gvec_mul16,
+ .opc = INDEX_op_mul_vec,
+ .vece = MO_16 },
+ { .fni4 = tcg_gen_mul_i32,
+ .fniv = tcg_gen_mul_vec,
+ .fno = gen_helper_gvec_mul32,
+ .opc = INDEX_op_mul_vec,
+ .vece = MO_32 },
+ { .fni8 = tcg_gen_mul_i64,
+ .fniv = tcg_gen_mul_vec,
+ .fno = gen_helper_gvec_mul64,
+ .opc = INDEX_op_mul_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3(dofs, aofs, bofs, opsz, maxsz, &g[vece]);
+}
+
/* Perform a vector negation using normal negation and a mask.
Compare gen_subv_mask above. */
static void gen_negv_mask(TCGv_i64 d, TCGv_i64 b, TCGv_i64 m)
@@ -503,3 +503,25 @@ void tcg_gen_cmp_vec(TCGCond cond, unsigned vece,
tcg_expand_vec_op(INDEX_op_cmp_vec, type, vece, ri, ai, bi, cond);
}
}
+
+void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+ TCGTemp *rt = tcgv_vec_temp(r);
+ TCGTemp *at = tcgv_vec_temp(a);
+ TCGTemp *bt = tcgv_vec_temp(b);
+ TCGArg ri = temp_arg(rt);
+ TCGArg ai = temp_arg(at);
+ TCGArg bi = temp_arg(bt);
+ TCGType type = rt->base_type;
+ int can;
+
+ tcg_debug_assert(at->base_type == type);
+ tcg_debug_assert(bt->base_type == type);
+ can = tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece);
+ if (can > 0) {
+ vec_gen_3(INDEX_op_mul_vec, type, vece, ri, ai, bi);
+ } else {
+ tcg_debug_assert(can < 0);
+ tcg_expand_vec_op(INDEX_op_mul_vec, type, vece, ri, ai, bi);
+ }
+}
@@ -1404,6 +1404,8 @@ bool tcg_op_supported(TCGOpcode op)
return have_vec && TCG_TARGET_HAS_andc_vec;
case INDEX_op_orc_vec:
return have_vec && TCG_TARGET_HAS_orc_vec;
+ case INDEX_op_mul_vec:
+ return have_vec && TCG_TARGET_HAS_mul_vec;
case INDEX_op_shli_vec:
case INDEX_op_shri_vec:
case INDEX_op_sari_vec:
@@ -547,6 +547,10 @@ E.g. VECL=1 -> 64 << 1 -> v128, and VECE=2 -> 1 << 2 -> i32.
Similarly, v0 = v1 - v2.
+* mul_vec v0, v1, v2
+
+ Similarly, v0 = v1 * v2.
+
* neg_vec v0, v1
Similarly, v0 = -v1.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/tcg-runtime.h | 5 +++++ tcg/i386/tcg-target.h | 1 + tcg/tcg-op-gvec.h | 2 ++ tcg/tcg-op.h | 1 + tcg/tcg-opc.h | 1 + tcg/tcg.h | 1 + accel/tcg/tcg-runtime-gvec.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ tcg/tcg-op-gvec.c | 29 +++++++++++++++++++++++++++++ tcg/tcg-op-vec.c | 22 ++++++++++++++++++++++ tcg/tcg.c | 2 ++ tcg/README | 4 ++++ 11 files changed, 112 insertions(+) -- 2.13.6