[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 32/48] tcg/optimize: Split out fold_xi_to_i
From: |
Richard Henderson |
Subject: |
[PATCH 32/48] tcg/optimize: Split out fold_xi_to_i |
Date: |
Sun, 29 Aug 2021 23:24:35 -0700 |
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
and use it in the outer opcode fold functions.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 32 +++++++++++++++-----------------
1 file changed, 15 insertions(+), 17 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index cb05da7b39..343fb7590a 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -695,6 +695,15 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
return false;
}
+/* If the binary operation has second argument @i, fold to @i. */
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
+ }
+ return false;
+}
+
/* If the binary operation has both arguments equal, fold to @i. */
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
@@ -763,6 +772,7 @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
static bool fold_and(OptContext *ctx, TCGOp *op)
{
if (fold_const2(ctx, op) ||
+ fold_xi_to_i(ctx, op, 0) ||
fold_xx_to_x(ctx, op)) {
return true;
}
@@ -1078,7 +1088,11 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
static bool fold_multiply(OptContext *ctx, TCGOp *op)
{
- return fold_const2(ctx, op);
+ if (fold_const2(ctx, op) ||
+ fold_xi_to_i(ctx, op, 0)) {
+ return true;
+ }
+ return false;
}
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
@@ -1740,22 +1754,6 @@ void tcg_optimize(TCGContext *s)
continue;
}
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
- switch (opc) {
- CASE_OP_32_64_VEC(and):
- CASE_OP_32_64_VEC(mul):
- CASE_OP_32_64(muluh):
- CASE_OP_32_64(mulsh):
- if (arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
- continue;
- }
- break;
- default:
- break;
- }
-
/*
* Process each opcode.
* Sorted alphabetically by opcode as much as possible.
--
2.25.1
- Re: [PATCH 27/48] tcg/optimize: Split out fold_bswap, (continued)
- [PATCH 29/48] tcg/optimize: Split out fold_mov, Richard Henderson, 2021/08/30
- [PATCH 30/48] tcg/optimize: Split out fold_xx_to_i, Richard Henderson, 2021/08/30
- [PATCH 38/48] tcg/optimize: Split out fold_masks, Richard Henderson, 2021/08/30
- [PATCH 26/48] tcg/optimize: Split out fold_count_zeros, Richard Henderson, 2021/08/30
- [PATCH 28/48] tcg/optimize: Split out fold_dup, fold_dup2, Richard Henderson, 2021/08/30
- [PATCH 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov,movi}, Richard Henderson, 2021/08/30
- [PATCH 32/48] tcg/optimize: Split out fold_xi_to_i,
Richard Henderson <=
- [PATCH 34/48] tcg/optimize: Split out fold_to_not, Richard Henderson, 2021/08/30
- [PATCH 31/48] tcg/optimize: Split out fold_xx_to_x, Richard Henderson, 2021/08/30
- [PATCH 33/48] tcg/optimize: Add type to OptContext, Richard Henderson, 2021/08/30
- [PATCH 35/48] tcg/optimize: Split out fold_sub_to_neg, Richard Henderson, 2021/08/30
- [PATCH 36/48] tcg/optimize: Split out fold_xi_to_x, Richard Henderson, 2021/08/30
- [PATCH 37/48] tcg/optimize: Split out fold_ix_to_i, Richard Henderson, 2021/08/30
- [PATCH 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies, Richard Henderson, 2021/08/30
- [PATCH 40/48] tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops, Richard Henderson, 2021/08/30