[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH for-8.0 24/29] tcg/i386: Replace is64 with type in qemu_ld/st rou
From: |
Richard Henderson |
Subject: |
[PATCH for-8.0 24/29] tcg/i386: Replace is64 with type in qemu_ld/st routines |
Date: |
Fri, 18 Nov 2022 01:47:49 -0800 |
Prepare for TCG_TYPE_I128 by not using a boolean.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/i386/tcg-target.c.inc | 54 ++++++++++++++++++++++++++-------------
1 file changed, 36 insertions(+), 18 deletions(-)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index eb93807b5f..e38f08bd12 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1772,7 +1772,7 @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
* Record the context of a call to the out of line helper code for the slow
path
* for a load or store, so that we can later generate the correct helper code
*/
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGType type,
MemOpIdx oi,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
@@ -1783,7 +1783,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool
is_ld, bool is_64,
label->is_ld = is_ld;
label->oi = oi;
- label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+ label->type = type;
label->datalo_reg = datalo;
label->datahi_reg = datahi;
label->addrlo_reg = addrlo;
@@ -2124,10 +2124,10 @@ static inline int setup_guest_base_seg(void)
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs,
- int seg, bool is64, MemOp memop)
+ int seg, TCGType type, MemOp memop)
{
bool use_movbe = false;
- int rexw = is64 * P_REXW;
+ int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
int movop = OPC_MOVL_GvEv;
/* Do big-endian loads with movbe. */
@@ -2220,7 +2220,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg
datalo, TCGReg datahi,
/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
EAX. It will be useful once fixed registers globals are less
common. */
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
{
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
@@ -2232,7 +2232,16 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg
*args, bool is64)
#endif
datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ switch (type) {
+ case TCG_TYPE_I32:
+ datahi = 0;
+ break;
+ case TCG_TYPE_I64:
+ datahi = (TCG_TARGET_REG_BITS == 32 ? *args++ : 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
oi = *args++;
@@ -2243,10 +2252,10 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg
*args, bool is64)
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
- tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, type, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s, true, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, true, type, oi, datalo, datahi,
TCG_REG_L1, addrhi, s->code_ptr, label_ptr);
#else
a_bits = get_alignment_bits(opc);
@@ -2255,9 +2264,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg
*args, bool is64)
}
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
x86_guest_base_offset, x86_guest_base_seg,
- is64, opc);
+ type, opc);
if (a_bits) {
- add_qemu_ldst_label(s, true, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, true, type, oi, datalo, datahi,
addrlo, addrhi, s->code_ptr, label_ptr);
}
#endif
@@ -2315,7 +2324,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg
datalo, TCGReg datahi,
}
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type)
{
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
@@ -2327,7 +2336,16 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg
*args, bool is64)
#endif
datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ switch (type) {
+ case TCG_TYPE_I32:
+ datahi = 0;
+ break;
+ case TCG_TYPE_I64:
+ datahi = (TCG_TARGET_REG_BITS == 32 ? *args++ : 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
oi = *args++;
@@ -2341,7 +2359,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg
*args, bool is64)
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s, false, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, false, type, oi, datalo, datahi,
TCG_REG_L1, addrhi, s->code_ptr, label_ptr);
#else
a_bits = get_alignment_bits(opc);
@@ -2351,7 +2369,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg
*args, bool is64)
tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
x86_guest_base_offset, x86_guest_base_seg, opc);
if (a_bits) {
- add_qemu_ldst_label(s, false, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, false, type, oi, datalo, datahi,
addrlo, addrhi, s->code_ptr, label_ptr);
}
#endif
@@ -2649,17 +2667,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode
opc,
break;
case INDEX_op_qemu_ld_i32:
- tcg_out_qemu_ld(s, args, 0);
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
break;
case INDEX_op_qemu_ld_i64:
- tcg_out_qemu_ld(s, args, 1);
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st8_i32:
- tcg_out_qemu_st(s, args, 0);
+ tcg_out_qemu_st(s, args, TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64:
- tcg_out_qemu_st(s, args, 1);
+ tcg_out_qemu_st(s, args, TCG_TYPE_I64);
break;
OP_32_64(mulu2):
--
2.34.1
- [PATCH for-8.0 07/29] accel/tcg: Honor atomicity of loads, (continued)
- [PATCH for-8.0 07/29] accel/tcg: Honor atomicity of loads, Richard Henderson, 2022/11/18
- [PATCH for-8.0 09/29] tcg/tci: Use cpu_{ld,st}_mmu, Richard Henderson, 2022/11/18
- [PATCH for-8.0 08/29] accel/tcg: Honor atomicity of stores, Richard Henderson, 2022/11/18
- [PATCH for-8.0 11/29] accel/tcg: Implement helper_{ld, st}*_mmu for user-only, Richard Henderson, 2022/11/18
- [PATCH for-8.0 10/29] tcg: Unify helper_{be,le}_{ld,st}*, Richard Henderson, 2022/11/18
- [PATCH for-8.0 12/29] tcg: Add 128-bit guest memory primitives, Richard Henderson, 2022/11/18
- [PATCH for-8.0 21/29] tcg/i386: Introduce tcg_out_mov2, Richard Henderson, 2022/11/18
- [PATCH for-8.0 24/29] tcg/i386: Replace is64 with type in qemu_ld/st routines,
Richard Henderson <=
- [PATCH for-8.0 28/29] tcg/i386: Add vex_v argument to tcg_out_vex_modrm_pool, Richard Henderson, 2022/11/18
- [PATCH for-8.0 27/29] tcg/i386: Support 128-bit load/store with have_atomic16, Richard Henderson, 2022/11/18
- [PATCH for-8.0 17/29] tcg/aarch64: Add have_lse, have_lse2, Richard Henderson, 2022/11/18
- [PATCH for-8.0 18/29] accel/tcg: Add aarch64 specific support in ldst_atomicity, Richard Henderson, 2022/11/18
- [PATCH for-8.0 25/29] tcg/i386: Mark Win64 call-saved vector regs as reserved, Richard Henderson, 2022/11/18
- [PATCH for-8.0 29/29] tcg/i386: Honor 64-bit atomicity in 32-bit mode, Richard Henderson, 2022/11/18