@@ -1191,6 +1191,10 @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
* 3) those that produce information about the result value.
*/
+static bool fold_or(OptContext *ctx, TCGOp *op);
+static bool fold_orc(OptContext *ctx, TCGOp *op);
+static bool fold_xor(OptContext *ctx, TCGOp *op);
+
static bool fold_add(OptContext *ctx, TCGOp *op)
{
if (fold_const2_commutative(ctx, op) ||
@@ -1347,6 +1351,61 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
return fold_masks_zsa(ctx, op, z_mask, s_mask, a_mask);
}
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
+{
+ /* If true and false values are the same, eliminate the cmp. */
+ if (args_are_copies(op->args[2], op->args[3])) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
+ }
+
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
+ uint64_t tv = arg_info(op->args[2])->val;
+ uint64_t fv = arg_info(op->args[3])->val;
+
+ if (tv == -1 && fv == 0) {
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
+ }
+ if (tv == 0 && fv == -1) {
+ if (TCG_TARGET_HAS_not_vec) {
+ op->opc = INDEX_op_not_vec;
+ return fold_not(ctx, op);
+ } else {
+ op->opc = INDEX_op_xor_vec;
+ op->args[2] = arg_new_constant(ctx, -1);
+ return fold_xor(ctx, op);
+ }
+ }
+ }
+ if (arg_is_const(op->args[2])) {
+ uint64_t tv = arg_info(op->args[2])->val;
+ if (tv == -1) {
+ op->opc = INDEX_op_or_vec;
+ op->args[2] = op->args[3];
+ return fold_or(ctx, op);
+ }
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
+ op->opc = INDEX_op_andc_vec;
+ op->args[2] = op->args[1];
+ op->args[1] = op->args[3];
+ return fold_andc(ctx, op);
+ }
+ }
+ if (arg_is_const(op->args[3])) {
+ uint64_t fv = arg_info(op->args[3])->val;
+ if (fv == 0) {
+ op->opc = INDEX_op_and_vec;
+ return fold_and(ctx, op);
+ }
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
+ op->opc = INDEX_op_orc_vec;
+ op->args[2] = op->args[1];
+ op->args[1] = op->args[3];
+ return fold_orc(ctx, op);
+ }
+ }
+ return finish_folding(ctx, op);
+}
+
static bool fold_brcond(OptContext *ctx, TCGOp *op)
{
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
@@ -2758,61 +2817,6 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
return fold_masks_zs(ctx, op, z_mask, s_mask);
}
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
-{
- /* If true and false values are the same, eliminate the cmp. */
- if (args_are_copies(op->args[2], op->args[3])) {
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
- }
-
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
- uint64_t tv = arg_info(op->args[2])->val;
- uint64_t fv = arg_info(op->args[3])->val;
-
- if (tv == -1 && fv == 0) {
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
- }
- if (tv == 0 && fv == -1) {
- if (TCG_TARGET_HAS_not_vec) {
- op->opc = INDEX_op_not_vec;
- return fold_not(ctx, op);
- } else {
- op->opc = INDEX_op_xor_vec;
- op->args[2] = arg_new_constant(ctx, -1);
- return fold_xor(ctx, op);
- }
- }
- }
- if (arg_is_const(op->args[2])) {
- uint64_t tv = arg_info(op->args[2])->val;
- if (tv == -1) {
- op->opc = INDEX_op_or_vec;
- op->args[2] = op->args[3];
- return fold_or(ctx, op);
- }
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
- op->opc = INDEX_op_andc_vec;
- op->args[2] = op->args[1];
- op->args[1] = op->args[3];
- return fold_andc(ctx, op);
- }
- }
- if (arg_is_const(op->args[3])) {
- uint64_t fv = arg_info(op->args[3])->val;
- if (fv == 0) {
- op->opc = INDEX_op_and_vec;
- return fold_and(ctx, op);
- }
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
- op->opc = INDEX_op_orc_vec;
- op->args[2] = op->args[1];
- op->args[1] = op->args[3];
- return fold_orc(ctx, op);
- }
- }
- return finish_folding(ctx, op);
-}
-
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
The big comment just above says functions should be sorted. Add forward declarations as needed. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------ 1 file changed, 59 insertions(+), 55 deletions(-)