[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tc
From: |
Richard Henderson |
Subject: |
[Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl |
Date: |
Sat, 14 Sep 2013 14:54:25 -0700 |
Now that we've converted opcode fields to pre-shifted insns, we
can merge the implementation of arithmetic and shift insns.
Simplify the left/right shift parameter to just the left shift
needed by tcg_out_tlb_read.
Signed-off-by: Richard Henderson <address@hidden>
---
tcg/aarch64/tcg-target.c | 78 +++++++++++++++++++++++-------------------------
1 file changed, 38 insertions(+), 40 deletions(-)
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index cc56fe5..0e7b67b 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -302,6 +302,30 @@ static inline uint32_t tcg_in32(TCGContext *s)
return v;
}
+/*
+ * Encode various formats.
+ */
+
+/* This function can be used for both Arithmetic and Logical (shifted register)
+ type insns. Since we don't actually use the other available shifts, we only
+ support LSL here. */
+static inline void tcg_fmt_Rdnm_lsl(TCGContext *s, AArch64Insn insn,
+ TCGType sf, TCGReg rd, TCGReg rn,
+ TCGReg rm, int imm6)
+{
+ /* Note that LSL is bits {23,22} = 0. */
+ tcg_out32(s, insn | sf << 31 | imm6 << 10 | rm << 16 | rn << 5 | rd);
+}
+
+/* This function can be used for most insns with 2 input registers and one
+ output register. This includes Arithmetic (shifted register, sans shift),
+ Logical, Shift, Multiply, Divide, and Bit operation. */
+static inline void tcg_fmt_Rdnm(TCGContext *s, AArch64Insn insn, TCGType sf,
+ TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ tcg_out32(s, insn | sf << 31 | rm << 16 | rn << 5 | rd);
+}
+
static inline void tcg_out_ldst_9(TCGContext *s,
enum aarch64_ldst_op_data op_data,
enum aarch64_ldst_op_type op_type,
@@ -445,23 +469,6 @@ static inline void tcg_out_st(TCGContext *s, TCGType type,
TCGReg arg,
arg, arg1, arg2);
}
-static inline void tcg_out_arith(TCGContext *s, AArch64Insn insn,
- TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm,
- int shift_imm)
-{
- /* Using shifted register arithmetic operations */
- /* if extended register operation (64bit) just OR with 0x80 << 24 */
- unsigned int shift, base = insn | (ext ? 0x80000000 : 0);
- if (shift_imm == 0) {
- shift = 0;
- } else if (shift_imm > 0) {
- shift = shift_imm << 10 | 1 << 22;
- } else /* (shift_imm < 0) */ {
- shift = (-shift_imm) << 10;
- }
- tcg_out32(s, base | rm << 16 | shift | rn << 5 | rd);
-}
-
static inline void tcg_out_mul(TCGContext *s, TCGType ext,
TCGReg rd, TCGReg rn, TCGReg rm)
{
@@ -470,15 +477,6 @@ static inline void tcg_out_mul(TCGContext *s, TCGType ext,
tcg_out32(s, base | rm << 16 | rn << 5 | rd);
}
-static inline void tcg_out_shiftrot_reg(TCGContext *s,
- AArch64Insn insn, TCGType ext,
- TCGReg rd, TCGReg rn, TCGReg rm)
-{
- /* using 2-source data processing instructions 0x1ac02000 */
- unsigned int base = insn | (ext ? 0x80000000 : 0);
- tcg_out32(s, base | rm << 16 | rn << 5 | rd);
-}
-
static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd,
TCGReg rn, unsigned int a, unsigned int b)
{
@@ -546,7 +544,7 @@ static inline void tcg_out_cmp(TCGContext *s, TCGType ext,
TCGReg rn,
TCGReg rm)
{
/* Using CMP alias SUBS wzr, Wn, Wm */
- tcg_out_arith(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm, 0);
+ tcg_fmt_Rdnm(s, INSN_SUBS, ext, TCG_REG_XZR, rn, rm);
}
static inline void tcg_out_cset(TCGContext *s, TCGType ext,
@@ -906,8 +904,8 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg,
tcg_out_addi(s, 1, TCG_REG_X2, base, tlb_offset & 0xfff000);
/* Merge the tlb index contribution into X2.
X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
- tcg_out_arith(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
- TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
+ tcg_fmt_Rdnm_lsl(s, INSN_ADD, 1, TCG_REG_X2, TCG_REG_X2,
+ TCG_REG_X0, CPU_TLB_ENTRY_BITS);
/* Merge "low bits" from tlb offset, load the tlb comparator into X0.
X0 = load [X2 + (tlb_offset & 0x000fff)] */
tcg_out_ldst(s, TARGET_LONG_BITS == 64 ? LDST_64 : LDST_32,
@@ -1183,27 +1181,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_add_i64:
case INDEX_op_add_i32:
- tcg_out_arith(s, INSN_ADD, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_ADD, ext, a0, a1, a2);
break;
case INDEX_op_sub_i64:
case INDEX_op_sub_i32:
- tcg_out_arith(s, INSN_SUB, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_SUB, ext, a0, a1, a2);
break;
case INDEX_op_and_i64:
case INDEX_op_and_i32:
- tcg_out_arith(s, INSN_AND, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_AND, ext, a0, a1, a2);
break;
case INDEX_op_or_i64:
case INDEX_op_or_i32:
- tcg_out_arith(s, INSN_ORR, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_ORR, ext, a0, a1, a2);
break;
case INDEX_op_xor_i64:
case INDEX_op_xor_i32:
- tcg_out_arith(s, INSN_EOR, ext, a0, a1, a2, 0);
+ tcg_fmt_Rdnm(s, INSN_EOR, ext, a0, a1, a2);
break;
case INDEX_op_mul_i64:
@@ -1216,7 +1214,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_shl(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_LSLV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_LSLV, ext, a0, a1, a2);
}
break;
@@ -1225,7 +1223,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_shr(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_LSRV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_LSRV, ext, a0, a1, a2);
}
break;
@@ -1234,7 +1232,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_sar(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_ASRV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_ASRV, ext, a0, a1, a2);
}
break;
@@ -1243,7 +1241,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_rotr(s, ext, a0, a1, a2);
} else {
- tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, a2);
+ tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, a2);
}
break;
@@ -1252,8 +1250,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
if (c2) {
tcg_out_rotl(s, ext, a0, a1, a2);
} else {
- tcg_out_arith(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2, 0);
- tcg_out_shiftrot_reg(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
+ tcg_fmt_Rdnm(s, INSN_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
+ tcg_fmt_Rdnm(s, INSN_RORV, ext, a0, a1, TCG_REG_TMP);
}
break;
--
1.8.3.1
[Qemu-devel] [PATCH v4 05/33] tcg-aarch64: Change enum aarch64_arith_opc to AArch64Insn, Richard Henderson, 2013/09/14
[Qemu-devel] [PATCH v4 06/33] tcg-aarch64: Merge enum aarch64_srr_opc with AArch64Insn, Richard Henderson, 2013/09/14
[Qemu-devel] [PATCH v4 07/33] tcg-aarch64: Remove the shift_imm parameter from tcg_out_cmp, Richard Henderson, 2013/09/14
[Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl,
Richard Henderson <=
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Claudio Fontana, 2013/09/16
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Richard Henderson, 2013/09/16
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Richard Henderson, 2013/09/16
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Claudio Fontana, 2013/09/17
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Richard Henderson, 2013/09/17
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Claudio Fontana, 2013/09/18
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Richard Henderson, 2013/09/18
- Re: [Qemu-devel] [PATCH v4 08/33] tcg-aarch64: Introduce tcg_fmt_Rdnm and tcg_fmt_Rdnm_lsl, Claudio Fontana, 2013/09/18
[Qemu-devel] [PATCH v4 09/33] tcg-aarch64: Introduce tcg_fmt_Rdn_aimm, Richard Henderson, 2013/09/14