@@ -134,7 +134,7 @@ typedef enum {
#define TCG_TARGET_HAS_neg_vec 1
#define TCG_TARGET_HAS_shi_vec 1
#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 0
+#define TCG_TARGET_HAS_shv_vec 1
#define TCG_TARGET_HAS_cmp_vec 1
#define TCG_TARGET_HAS_mul_vec 1
#define TCG_TARGET_HAS_sat_vec 1
@@ -1,3 +1,5 @@
/* Target-specific opcodes for host vector expansion. These will be
emitted by tcg_expand_vec_op. For those familiar with GCC internals,
consider these to be UNSPEC with names. */
+
+DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC)
@@ -536,12 +536,14 @@ typedef enum {
I3616_CMEQ = 0x2e208c00,
I3616_SMAX = 0x0e206400,
I3616_SMIN = 0x0e206c00,
+ I3616_SSHL = 0x0e204400,
I3616_SQADD = 0x0e200c00,
I3616_SQSUB = 0x0e202c00,
I3616_UMAX = 0x2e206400,
I3616_UMIN = 0x2e206c00,
I3616_UQADD = 0x2e200c00,
I3616_UQSUB = 0x2e202c00,
+ I3616_USHL = 0x2e204400,
/* AdvSIMD two-reg misc. */
I3617_CMGT0 = 0x0e208800,
@@ -2257,6 +2259,12 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_sari_vec:
tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
break;
+ case INDEX_op_shlv_vec:
+ tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
+ break;
+ case INDEX_op_aa64_sshl_vec:
+ tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
+ break;
case INDEX_op_cmp_vec:
{
TCGCond cond = args[3];
@@ -2324,7 +2332,11 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
case INDEX_op_smin_vec:
case INDEX_op_umax_vec:
case INDEX_op_umin_vec:
+ case INDEX_op_shlv_vec:
return 1;
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ return -1;
case INDEX_op_mul_vec:
return vece < MO_64;
@@ -2336,6 +2348,32 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
+ va_list va;
+ TCGv_vec v0, v1, v2, t1;
+
+ va_start(va, a0);
+ v0 = temp_tcgv_vec(arg_temp(a0));
+ v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
+ v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
+
+ switch (opc) {
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ /* Right shifts are negative left shifts for AArch64. */
+ t1 = tcg_temp_new_vec(type);
+ tcg_gen_neg_vec(vece, t1, v2);
+ opc = (opc == INDEX_op_shrv_vec
+ ? INDEX_op_shlv_vec : INDEX_op_aa64_sshl_vec);
+ vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
+ tcg_temp_free_vec(t1);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ va_end(va);
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
@@ -2517,6 +2555,10 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_smin_vec:
case INDEX_op_umax_vec:
case INDEX_op_umin_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ case INDEX_op_aa64_sshl_vec:
return &w_w_w;
case INDEX_op_not_vec:
case INDEX_op_neg_vec: