[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
From: |
Richard Henderson |
Subject: |
[PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface |
Date: |
Wed, 27 Oct 2021 19:40:44 -0700 |
Adjust the interface to take the OptContext parameter instead
of TCGContext or both.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
1 file changed, 34 insertions(+), 33 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index a37efff4d0..627a5b39f6 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -45,6 +45,7 @@ typedef struct TempOptInfo {
} TempOptInfo;
typedef struct OptContext {
+ TCGContext *tcg;
TCGTempSet temps_used;
} OptContext;
@@ -183,7 +184,7 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
}
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
TCGTemp *src_ts = arg_temp(src);
@@ -194,7 +195,7 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op,
TCGArg dst, TCGArg src)
TCGOpcode new_op;
if (ts_are_copies(dst_ts, src_ts)) {
- tcg_op_remove(s, op);
+ tcg_op_remove(ctx->tcg, op);
return;
}
@@ -233,8 +234,8 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op,
TCGArg dst, TCGArg src)
}
}
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
- TCGOp *op, TCGArg dst, uint64_t val)
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
+ TCGArg dst, uint64_t val)
{
const TCGOpDef *def = &tcg_op_defs[op->opc];
TCGType type;
@@ -251,7 +252,7 @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
/* Convert movi to mov with constant temp. */
tv = tcg_constant_internal(type, val);
init_ts_info(ctx, tv);
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
}
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
@@ -609,7 +610,7 @@ void tcg_optimize(TCGContext *s)
{
int nb_temps, nb_globals, i;
TCGOp *op, *op_next, *prev_mb = NULL;
- OptContext ctx = {};
+ OptContext ctx = { .tcg = s };
/* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element.
@@ -723,7 +724,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(rotr):
if (arg_is_const(op->args[1])
&& arg_info(op->args[1])->val == 0) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
break;
@@ -838,7 +839,7 @@ void tcg_optimize(TCGContext *s)
if (!arg_is_const(op->args[1])
&& arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
break;
@@ -848,7 +849,7 @@ void tcg_optimize(TCGContext *s)
if (!arg_is_const(op->args[1])
&& arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == -1) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
break;
@@ -1088,12 +1089,12 @@ void tcg_optimize(TCGContext *s)
if (partmask == 0) {
tcg_debug_assert(nb_oargs == 1);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
if (affected == 0) {
tcg_debug_assert(nb_oargs == 1);
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
@@ -1105,7 +1106,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(mulsh):
if (arg_is_const(op->args[2])
&& arg_info(op->args[2])->val == 0) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
break;
@@ -1118,7 +1119,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(or):
CASE_OP_32_64_VEC(and):
if (args_are_copies(op->args[1], op->args[2])) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
}
break;
@@ -1132,7 +1133,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64_VEC(sub):
CASE_OP_32_64_VEC(xor):
if (args_are_copies(op->args[1], op->args[2])) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
continue;
}
break;
@@ -1145,14 +1146,14 @@ void tcg_optimize(TCGContext *s)
allocator where needed and possible. Also detect copies. */
switch (opc) {
CASE_OP_32_64_VEC(mov):
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
continue;
case INDEX_op_dup_vec:
if (arg_is_const(op->args[1])) {
tmp = arg_info(op->args[1])->val;
tmp = dup_const(TCGOP_VECE(op), tmp);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1160,7 +1161,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32);
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
deposit64(arg_info(op->args[1])->val, 32, 32,
arg_info(op->args[2])->val));
continue;
@@ -1186,7 +1187,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_extrh_i64_i32:
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1197,7 +1198,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
op->args[2]);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1227,7 +1228,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
arg_info(op->args[2])->val);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1238,9 +1239,9 @@ void tcg_optimize(TCGContext *s)
TCGArg v = arg_info(op->args[1])->val;
if (v != 0) {
tmp = do_constant_folding(opc, v, 0);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
} else {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
}
continue;
}
@@ -1251,7 +1252,7 @@ void tcg_optimize(TCGContext *s)
tmp = deposit64(arg_info(op->args[1])->val,
op->args[3], op->args[4],
arg_info(op->args[2])->val);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1260,7 +1261,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = extract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1269,7 +1270,7 @@ void tcg_optimize(TCGContext *s)
if (arg_is_const(op->args[1])) {
tmp = sextract64(arg_info(op->args[1])->val,
op->args[2], op->args[3]);
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1286,7 +1287,7 @@ void tcg_optimize(TCGContext *s)
tmp = (int32_t)(((uint32_t)v1 >> shr) |
((uint32_t)v2 << (32 - shr)));
}
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1295,7 +1296,7 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[3]);
if (tmp != 2) {
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
break;
@@ -1319,7 +1320,7 @@ void tcg_optimize(TCGContext *s)
tmp = do_constant_folding_cond(opc, op->args[1],
op->args[2], op->args[5]);
if (tmp != 2) {
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
continue;
}
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
@@ -1361,8 +1362,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
continue;
}
break;
@@ -1377,8 +1378,8 @@ void tcg_optimize(TCGContext *s)
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
continue;
}
break;
@@ -1466,7 +1467,7 @@ void tcg_optimize(TCGContext *s)
op->args[5]);
if (tmp != 2) {
do_setcond_const:
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
continue;
}
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
--
2.25.1
- [PULL 00/56] tcg patch queue, Richard Henderson, 2021/10/27
- [PULL 02/56] host-utils: move checks out of divu128/divs128, Richard Henderson, 2021/10/27
- [PULL 03/56] host-utils: move udiv_qrnnd() to host-utils, Richard Henderson, 2021/10/27
- [PULL 01/56] qemu/int128: Add int128_{not,xor}, Richard Henderson, 2021/10/27
- [PULL 04/56] host-utils: add 128-bit quotient support to divu128/divs128, Richard Henderson, 2021/10/27
- [PULL 05/56] host-utils: add unit tests for divu128/divs128, Richard Henderson, 2021/10/27
- [PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface,
Richard Henderson <=
- [PULL 06/56] tcg/optimize: Rename "mask" to "z_mask", Richard Henderson, 2021/10/27
- [PULL 07/56] tcg/optimize: Split out OptContext, Richard Henderson, 2021/10/27
- [PULL 10/56] tcg/optimize: Move prev_mb into OptContext, Richard Henderson, 2021/10/27
- [PULL 12/56] tcg/optimize: Split out copy_propagate, Richard Henderson, 2021/10/27
- [PULL 13/56] tcg/optimize: Split out fold_call, Richard Henderson, 2021/10/27
- [PULL 18/56] tcg/optimize: Use a boolean to avoid a mass of continues, Richard Henderson, 2021/10/27
- [PULL 15/56] tcg/optimize: Change fail return for do_constant_folding_cond*, Richard Henderson, 2021/10/27
- [PULL 27/56] tcg/optimize: Split out fold_movcond, Richard Henderson, 2021/10/27
- [PULL 11/56] tcg/optimize: Split out init_arguments, Richard Henderson, 2021/10/27
- [PULL 14/56] tcg/optimize: Drop nb_oargs, nb_iargs locals, Richard Henderson, 2021/10/27