[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 45/48] tcg/optimize: Propagate sign info for logical operations
From: |
Richard Henderson |
Subject: |
[PATCH 45/48] tcg/optimize: Propagate sign info for logical operations |
Date: |
Sun, 29 Aug 2021 23:24:48 -0700 |
Sign repetitions are perforce all identical, whether they are 1 or 0.
Bitwise operations preserve the relative quantity of the repetitions.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 9a752fbe29..cbb5700f44 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -952,6 +952,13 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
z2 = arg_info(op->args[2])->z_mask;
ctx->z_mask = z1 & z2;
+ /*
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
+ * Bitwise operations preserve the relative quantity of the repetitions.
+ */
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
+
/*
* Known-zeros does not imply known-ones. Therefore unless
* arg2 is constant, we can't infer affected bits from it.
@@ -987,6 +994,8 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
}
ctx->z_mask = z1;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
@@ -1278,6 +1287,9 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1466,6 +1478,8 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[3])->z_mask
| arg_info(op->args[4])->z_mask;
+ ctx->s_mask = arg_info(op->args[3])->s_mask
+ & arg_info(op->args[4])->s_mask;
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
uint64_t tv = arg_info(op->args[3])->val;
@@ -1545,6 +1559,9 @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, -1)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1574,6 +1591,9 @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1583,6 +1603,8 @@ static bool fold_not(OptContext *ctx, TCGOp *op)
return true;
}
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
+
/* Because of fold_to_not, we want to always return true, via finish. */
finish_folding(ctx, op);
return true;
@@ -1598,6 +1620,8 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[1])->z_mask
| arg_info(op->args[2])->z_mask;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
@@ -1609,6 +1633,9 @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
fold_xi_to_not(ctx, op, 0)) {
return true;
}
+
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return false;
}
@@ -1873,6 +1900,8 @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
ctx->z_mask = arg_info(op->args[1])->z_mask
| arg_info(op->args[2])->z_mask;
+ ctx->s_mask = arg_info(op->args[1])->s_mask
+ & arg_info(op->args[2])->s_mask;
return fold_masks(ctx, op);
}
--
2.25.1
- [PATCH 33/48] tcg/optimize: Add type to OptContext, (continued)
- [PATCH 33/48] tcg/optimize: Add type to OptContext, Richard Henderson, 2021/08/30
- [PATCH 35/48] tcg/optimize: Split out fold_sub_to_neg, Richard Henderson, 2021/08/30
- [PATCH 36/48] tcg/optimize: Split out fold_xi_to_x, Richard Henderson, 2021/08/30
- [PATCH 37/48] tcg/optimize: Split out fold_ix_to_i, Richard Henderson, 2021/08/30
- [PATCH 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies, Richard Henderson, 2021/08/30
- [PATCH 40/48] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops, Richard Henderson, 2021/08/30
- [PATCH 41/48] tcg/optimize: Sink commutative operand swapping into fold functions, Richard Henderson, 2021/08/30
- [PATCH 47/48] tcg/optimize: Propagate sign info for bit counting, Richard Henderson, 2021/08/30
- [PATCH 44/48] tcg/optimize: Optimize sign extensions, Richard Henderson, 2021/08/30
- [PATCH 45/48] tcg/optimize: Propagate sign info for logical operations,
Richard Henderson <=
- [PATCH 43/48] tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values, Richard Henderson, 2021/08/30
- [PATCH 46/48] tcg/optimize: Propagate sign info for setcond, Richard Henderson, 2021/08/30
- [PATCH 42/48] tcg/optimize: Add more simplifications for orc, Richard Henderson, 2021/08/30
- [PATCH 48/48] tcg/optimize: Propagate sign info for shifting, Richard Henderson, 2021/08/30
- Re: [PATCH 00/48] tcg: optimize redundant sign extensions, Philippe Mathieu-Daudé, 2021/08/30