@@ -23,3 +23,6 @@ C_O1_I4(r, r, rI, rM, rM)
C_O2_I4(r, r, rZ, rZ, rM, rM)
C_O0_I2(v, r)
C_O1_I1(v, r)
+C_O1_I1(v, v)
+C_O1_I2(v, v, v)
+C_O1_I2(v, v, vK)
@@ -17,6 +17,7 @@ REGS('v', ALL_VECTOR_REGS)
*/
CONST('I', TCG_CT_CONST_S12)
CONST('J', TCG_CT_CONST_J12)
+CONST('K', TCG_CT_CONST_S5)
CONST('N', TCG_CT_CONST_N12)
CONST('M', TCG_CT_CONST_M12)
CONST('Z', TCG_CT_CONST_ZERO)
@@ -151,7 +151,7 @@ typedef enum {
#define TCG_TARGET_HAS_nand_vec 0
#define TCG_TARGET_HAS_nor_vec 0
#define TCG_TARGET_HAS_eqv_vec 0
-#define TCG_TARGET_HAS_not_vec 0
+#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec 0
#define TCG_TARGET_HAS_abs_vec 0
#define TCG_TARGET_HAS_roti_vec 0
@@ -111,6 +111,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_CT_CONST_N12 0x400
#define TCG_CT_CONST_M12 0x800
#define TCG_CT_CONST_J12 0x1000
+#define TCG_CT_CONST_S5 0x2000
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
@@ -129,6 +130,10 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
return 1;
}
+ if (type >= TCG_TYPE_V64) {
+ /* Val is replicated by VECE; extract the highest element. */
+ val >>= (-8 << vece) & 63;
+ }
/*
* Sign extended from 12 bits: [-0x800, 0x7ff].
* Used for most arithmetic, as this is the isa field.
@@ -158,6 +163,13 @@ static bool tcg_target_const_match(int64_t val, int ct,
if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
return 1;
}
+ /*
+ * Sign extended from 5 bits: [-0x10, 0x0f].
+ * Used for vector-immediate.
+ */
+ if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) {
+ return 1;
+ }
return 0;
}
@@ -310,6 +322,16 @@ typedef enum {
OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
+ OPC_VADD_VV = 0x57 | V_OPIVV,
+ OPC_VADD_VI = 0x57 | V_OPIVI,
+ OPC_VSUB_VV = 0x8000057 | V_OPIVV,
+ OPC_VAND_VV = 0x24000057 | V_OPIVV,
+ OPC_VAND_VI = 0x24000057 | V_OPIVI,
+ OPC_VOR_VV = 0x28000057 | V_OPIVV,
+ OPC_VOR_VI = 0x28000057 | V_OPIVI,
+ OPC_VXOR_VV = 0x2c000057 | V_OPIVV,
+ OPC_VXOR_VI = 0x2c000057 | V_OPIVI,
+
OPC_VMV_V_V = 0x5e000057 | V_OPIVV,
OPC_VMV_V_I = 0x5e000057 | V_OPIVI,
OPC_VMV_V_X = 0x5e000057 | V_OPIVX,
@@ -568,6 +590,12 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
* With RVV 1.0, vs2 is the first operand, while rs1/imm is the
* second operand.
*/
+static void tcg_out_opc_vv(TCGContext *s, RISCVInsn opc,
+ TCGReg vd, TCGReg vs2, TCGReg vs1)
+{
+ tcg_out32(s, encode_v(opc, vd, vs1, vs2, true));
+}
+
static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc,
TCGReg vd, TCGReg vs2, TCGReg rs1)
{
@@ -580,6 +608,16 @@ static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc,
tcg_out32(s, encode_vi(opc, vd, imm, vs2, true));
}
+static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi,
+ TCGReg vd, TCGReg vs2, TCGArg vi1, int c_vi1)
+{
+ if (c_vi1) {
+ tcg_out_opc_vi(s, o_vi, vd, vs2, vi1);
+ } else {
+ tcg_out_opc_vv(s, o_vv, vd, vs2, vi1);
+ }
+}
+
typedef struct VsetCache {
uint32_t movi_insn;
uint32_t vset_insn;
@@ -2165,10 +2203,12 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
{
TCGType type = vecl + TCG_TYPE_V64;
TCGArg a0, a1, a2;
+ int c2;
a0 = args[0];
a1 = args[1];
a2 = args[2];
+ c2 = const_args[2];
switch (opc) {
case INDEX_op_dupm_vec:
@@ -2180,6 +2220,30 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_st_vec:
tcg_out_st(s, type, a0, a1, a2);
break;
+ case INDEX_op_add_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv_vi(s, OPC_VADD_VV, OPC_VADD_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_sub_vec:
+ set_vtype_len_sew(s, type, vece);
+ tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2);
+ break;
+ case INDEX_op_and_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vv_vi(s, OPC_VAND_VV, OPC_VAND_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_or_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vv_vi(s, OPC_VOR_VV, OPC_VOR_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_xor_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vv_vi(s, OPC_VXOR_VV, OPC_VXOR_VI, a0, a1, a2, c2);
+ break;
+ case INDEX_op_not_vec:
+ set_vtype_len(s, type);
+ tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1);
+ break;
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
default:
@@ -2196,6 +2260,13 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
{
switch (opc) {
+ case INDEX_op_add_vec:
+ case INDEX_op_sub_vec:
+ case INDEX_op_and_vec:
+ case INDEX_op_or_vec:
+ case INDEX_op_xor_vec:
+ case INDEX_op_not_vec:
+ return 1;
default:
return 0;
}
@@ -2346,6 +2417,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_dupm_vec:
case INDEX_op_ld_vec:
return C_O1_I1(v, r);
+ case INDEX_op_not_vec:
+ return C_O1_I1(v, v);
+ case INDEX_op_add_vec:
+ case INDEX_op_and_vec:
+ case INDEX_op_or_vec:
+ case INDEX_op_xor_vec:
+ return C_O1_I2(v, v, vK);
+ case INDEX_op_sub_vec:
+ return C_O1_I2(v, v, v);
default:
g_assert_not_reached();
}