@@ -1451,7 +1451,7 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
static bool fold_andc(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask, s_mask;
+ uint64_t z_mask, o_mask, s_mask;
TempOptInfo *t1, *t2;
if (fold_const2(ctx, op) ||
@@ -1463,7 +1463,6 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
t1 = arg_info(op->args[1]);
t2 = arg_info(op->args[2]);
- z_mask = t1->z_mask;
if (ti_is_const(t2)) {
/* Fold andc r,x,i to and r,x,~i. */
@@ -1484,20 +1483,16 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
return fold_and(ctx, op);
}
- /*
- * Known-zeros does not imply known-ones. Therefore unless
- * arg2 is constant, we can't infer anything from it.
- */
- if (ti_is_const(t2)) {
- uint64_t v2 = ti_const_val(t2);
- if (fold_affected_mask(ctx, op, z_mask & v2)) {
- return true;
- }
- z_mask &= ~v2;
+ /* Affected bits are those not known zero, masked by those known zero. */
+ if (fold_affected_mask(ctx, op, t1->z_mask & t2->z_mask)) {
+ return true;
}
+ z_mask = t1->z_mask & ~t2->o_mask;
+ o_mask = t1->o_mask & ~t2->z_mask;
s_mask = t1->s_mask & t2->s_mask;
- return fold_masks_zs(ctx, op, z_mask, s_mask);
+
+ return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
}
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/optimize.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-)