@@ -125,7 +125,7 @@ extern bool use_neon_instructions;
#define TCG_TARGET_HAS_qemu_ldst_i128 0
-#define TCG_TARGET_HAS_tst 0
+#define TCG_TARGET_HAS_tst 1
#define TCG_TARGET_HAS_v64 use_neon_instructions
#define TCG_TARGET_HAS_v128 use_neon_instructions
@@ -1194,7 +1194,27 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
TCGArg b, int b_const)
{
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
+ if (!is_tst_cond(cond)) {
+ tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
+ return cond;
+ }
+
+ cond = tcg_tst_eqne_cond(cond);
+ if (b_const) {
+ int imm12 = encode_imm(b);
+
+ /*
+ * The compare constraints allow rIN, but TST does not support N.
+ * Be prepared to load the constant into a scratch register.
+ */
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
+ return cond;
+ }
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
+ b = TCG_REG_TMP;
+ }
+ tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
return cond;
}
@@ -1225,6 +1245,13 @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
return cond;
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ /* Similar, but with TST instead of CMP. */
+ tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh);
+ tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl);
+ return tcg_tst_eqne_cond(cond);
+
case TCG_COND_LT:
case TCG_COND_GE:
/* We perform a double-word subtraction and examine the result.