[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 30/57] tcg/s390x: Introduce HostAddress
From: |
Richard Henderson |
Subject: |
[PATCH v3 30/57] tcg/s390x: Introduce HostAddress |
Date: |
Mon, 24 Apr 2023 06:40:38 +0100 |
Collect the 3 potential parts of the host address into a struct.
Reorg tcg_out_qemu_{ld,st}_direct to use it.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/s390x/tcg-target.c.inc | 109 ++++++++++++++++++++-----------------
1 file changed, 60 insertions(+), 49 deletions(-)
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index e931f0cde4..da7ee5b085 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -1606,58 +1606,64 @@ static void tcg_out_call(TCGContext *s, const
tcg_insn_unit *dest,
tcg_out_call_int(s, dest);
}
+typedef struct {
+ TCGReg base;
+ TCGReg index;
+ int disp;
+} HostAddress;
+
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
- TCGReg base, TCGReg index, int disp)
+ HostAddress h)
{
switch (opc & (MO_SSIZE | MO_BSWAP)) {
case MO_UB:
- tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
+ tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
break;
case MO_SB:
- tcg_out_insn(s, RXY, LGB, data, base, index, disp);
+ tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
break;
case MO_UW | MO_BSWAP:
/* swapped unsigned halfword load with upper bits zeroed */
- tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
+ tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
tcg_out_ext16u(s, data, data);
break;
case MO_UW:
- tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
+ tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
break;
case MO_SW | MO_BSWAP:
/* swapped sign-extended halfword load */
- tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
+ tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
break;
case MO_SW:
- tcg_out_insn(s, RXY, LGH, data, base, index, disp);
+ tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
break;
case MO_UL | MO_BSWAP:
/* swapped unsigned int load with upper bits zeroed */
- tcg_out_insn(s, RXY, LRV, data, base, index, disp);
+ tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
tcg_out_ext32u(s, data, data);
break;
case MO_UL:
- tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
+ tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
break;
case MO_SL | MO_BSWAP:
/* swapped sign-extended int load */
- tcg_out_insn(s, RXY, LRV, data, base, index, disp);
+ tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
tcg_out_ext32s(s, data, data);
break;
case MO_SL:
- tcg_out_insn(s, RXY, LGF, data, base, index, disp);
+ tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
break;
case MO_UQ | MO_BSWAP:
- tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
+ tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
break;
case MO_UQ:
- tcg_out_insn(s, RXY, LG, data, base, index, disp);
+ tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
break;
default:
@@ -1666,44 +1672,44 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp
opc, TCGReg data,
}
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
- TCGReg base, TCGReg index, int disp)
+ HostAddress h)
{
switch (opc & (MO_SIZE | MO_BSWAP)) {
case MO_UB:
- if (disp >= 0 && disp < 0x1000) {
- tcg_out_insn(s, RX, STC, data, base, index, disp);
+ if (h.disp >= 0 && h.disp < 0x1000) {
+ tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
} else {
- tcg_out_insn(s, RXY, STCY, data, base, index, disp);
+ tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
}
break;
case MO_UW | MO_BSWAP:
- tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
+ tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
break;
case MO_UW:
- if (disp >= 0 && disp < 0x1000) {
- tcg_out_insn(s, RX, STH, data, base, index, disp);
+ if (h.disp >= 0 && h.disp < 0x1000) {
+ tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
} else {
- tcg_out_insn(s, RXY, STHY, data, base, index, disp);
+ tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
}
break;
case MO_UL | MO_BSWAP:
- tcg_out_insn(s, RXY, STRV, data, base, index, disp);
+ tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
break;
case MO_UL:
- if (disp >= 0 && disp < 0x1000) {
- tcg_out_insn(s, RX, ST, data, base, index, disp);
+ if (h.disp >= 0 && h.disp < 0x1000) {
+ tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
} else {
- tcg_out_insn(s, RXY, STY, data, base, index, disp);
+ tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
}
break;
case MO_UQ | MO_BSWAP:
- tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
+ tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
break;
case MO_UQ:
- tcg_out_insn(s, RXY, STG, data, base, index, disp);
+ tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
break;
default:
@@ -1883,20 +1889,23 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s,
TCGLabelQemuLdst *l)
return tcg_out_fail_alignment(s, l);
}
-static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
- TCGReg *index_reg, tcg_target_long *disp)
+static HostAddress tcg_prepare_user_ldst(TCGContext *s, TCGReg addr_reg)
{
+ TCGReg index;
+ int disp;
+
if (TARGET_LONG_BITS == 32) {
- tcg_out_ext32u(s, TCG_TMP0, *addr_reg);
- *addr_reg = TCG_TMP0;
+ tcg_out_ext32u(s, TCG_TMP0, addr_reg);
+ addr_reg = TCG_TMP0;
}
if (guest_base < 0x80000) {
- *index_reg = TCG_REG_NONE;
- *disp = guest_base;
+ index = TCG_REG_NONE;
+ disp = guest_base;
} else {
- *index_reg = TCG_GUEST_BASE_REG;
- *disp = 0;
+ index = TCG_GUEST_BASE_REG;
+ disp = 0;
}
+ return (HostAddress){ .base = addr_reg, .index = index, .disp = disp };
}
#endif /* CONFIG_SOFTMMU */
@@ -1904,31 +1913,32 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg
data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{
MemOp opc = get_memop(oi);
+ HostAddress h;
+
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr;
- TCGReg base_reg;
- base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
+ h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
+ h.index = TCG_REG_R2;
+ h.disp = 0;
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
label_ptr = s->code_ptr;
s->code_ptr += 1;
- tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
+ tcg_out_qemu_ld_direct(s, opc, data_reg, h);
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else
- TCGReg index_reg;
- tcg_target_long disp;
unsigned a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, true, addr_reg, a_bits);
}
- tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
- tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
+ h = tcg_prepare_user_ldst(s, addr_reg);
+ tcg_out_qemu_ld_direct(s, opc, data_reg, h);
#endif
}
@@ -1936,31 +1946,32 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg
data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{
MemOp opc = get_memop(oi);
+ HostAddress h;
+
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
tcg_insn_unit *label_ptr;
- TCGReg base_reg;
- base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
+ h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
+ h.index = TCG_REG_R2;
+ h.disp = 0;
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
label_ptr = s->code_ptr;
s->code_ptr += 1;
- tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
+ tcg_out_qemu_st_direct(s, opc, data_reg, h);
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else
- TCGReg index_reg;
- tcg_target_long disp;
unsigned a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, false, addr_reg, a_bits);
}
- tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
- tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
+ h = tcg_prepare_user_ldst(s, addr_reg);
+ tcg_out_qemu_st_direct(s, opc, data_reg, h);
#endif
}
--
2.34.1
- Re: [PATCH v3 18/57] tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld, st}, (continued)
- [PATCH v3 20/57] tcg/loongarch64: Introduce prepare_host_addr, Richard Henderson, 2023/04/24
- [PATCH v3 22/57] tcg/mips: Introduce prepare_host_addr, Richard Henderson, 2023/04/24
- [PATCH v3 24/57] tcg/ppc: Introduce HostAddress, Richard Henderson, 2023/04/24
- [PATCH v3 27/57] tcg/riscv: Rationalize args to tcg_out_qemu_{ld,st}, Richard Henderson, 2023/04/24
- [PATCH v3 25/57] tcg/ppc: Introduce prepare_host_addr, Richard Henderson, 2023/04/24
- [PATCH v3 32/57] tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return, Richard Henderson, 2023/04/24
- [PATCH v3 36/57] tcg: Introduce arg_slot_stk_ofs, Richard Henderson, 2023/04/24
- [PATCH v3 39/57] tcg/i386: Convert tcg_out_qemu_ld_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 30/57] tcg/s390x: Introduce HostAddress,
Richard Henderson <=
- [PATCH v3 34/57] tcg: Move TCGLabelQemuLdst to tcg.c, Richard Henderson, 2023/04/24
- [PATCH v3 33/57] tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st}, Richard Henderson, 2023/04/24
- [PATCH v3 38/57] tcg: Add routines for calling slow-path helpers, Richard Henderson, 2023/04/24
- [PATCH v3 40/57] tcg/i386: Convert tcg_out_qemu_st_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 42/57] tcg/arm: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 43/57] tcg/loongarch64: Convert tcg_out_qemu_{ld, st}_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 41/57] tcg/aarch64: Convert tcg_out_qemu_{ld,st}_slow_path, Richard Henderson, 2023/04/24
- [PATCH v3 50/57] tcg/mips: Reorg tlb load within prepare_host_addr, Richard Henderson, 2023/04/24