[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 36/42] target/arm: Convert VFP round insns to dec
From: |
Peter Maydell |
Subject: |
[Qemu-devel] [PATCH v2 36/42] target/arm: Convert VFP round insns to decodetree |
Date: |
Tue, 11 Jun 2019 11:53:45 +0100 |
Convert the VFP round-to-integer instructions VRINTR, VRINTZ and
VRINTX to decodetree.
These instructions were only introduced as part of the "VFP misc"
additions in v8A, so we check this. The old decoder's implementation
was incorrectly providing them even for v7A CPUs.
Signed-off-by: Peter Maydell <address@hidden>
Reviewed-by: Richard Henderson <address@hidden>
---
target/arm/translate-vfp.inc.c | 163 +++++++++++++++++++++++++++++++++
target/arm/translate.c | 45 +--------
target/arm/vfp.decode | 15 +++
3 files changed, 179 insertions(+), 44 deletions(-)
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
index a19ede86719..e94a8f2f0c5 100644
--- a/target/arm/translate-vfp.inc.c
+++ b/target/arm/translate-vfp.inc.c
@@ -2157,3 +2157,166 @@ static bool trans_VCVT_f16_f64(DisasContext *s,
arg_VCVT_f16_f64 *a)
tcg_temp_free_i32(tmp);
return true;
}
+
+static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ neon_load_reg32(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rints(tmp, tmp, fpst);
+ neon_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ neon_load_reg64(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rintd(tmp, tmp, fpst);
+ neon_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+ TCGv_i32 tcg_rmode;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ neon_load_reg32(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_rints(tmp, tmp, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ neon_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+ TCGv_i32 tcg_rmode;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ neon_load_reg64(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_rintd(tmp, tmp, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ neon_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tcg_rmode);
+ return true;
+}
+
+static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ neon_load_reg32(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rints_exact(tmp, tmp, fpst);
+ neon_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ neon_load_reg64(tmp, a->vm);
+ fpst = get_fpstatus_ptr(false);
+ gen_helper_rintd_exact(tmp, tmp, fpst);
+ neon_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 143b250a996..412d8aaedb2 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -3050,7 +3050,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
return 1;
case 15:
switch (rn) {
- case 0 ... 11:
+ case 0 ... 14:
/* Already handled by decodetree */
return 1;
default:
@@ -3063,11 +3063,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
if (op == 15) {
/* rn is opcode, encoded as per VFP_SREG_N. */
switch (rn) {
- case 0x0c: /* vrintr */
- case 0x0d: /* vrintz */
- case 0x0e: /* vrintx */
- break;
-
case 0x0f: /* vcvt double<->single */
rd_is_dp = !dp;
break;
@@ -3190,44 +3185,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
switch (op) {
case 15: /* extension space */
switch (rn) {
- case 12: /* vrintr */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- if (dp) {
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
- }
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 13: /* vrintz */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- TCGv_i32 tcg_rmode;
- tcg_rmode = tcg_const_i32(float_round_to_zero);
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
- if (dp) {
- gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
- }
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
- tcg_temp_free_i32(tcg_rmode);
- tcg_temp_free_ptr(fpst);
- break;
- }
- case 14: /* vrintx */
- {
- TCGv_ptr fpst = get_fpstatus_ptr(0);
- if (dp) {
- gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
- } else {
- gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
- }
- tcg_temp_free_ptr(fpst);
- break;
- }
case 15: /* single<->double conversion */
if (dp) {
gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
index b88d1d06f02..9942d2ae7ad 100644
--- a/target/arm/vfp.decode
+++ b/target/arm/vfp.decode
@@ -193,3 +193,18 @@ VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
vd=%vd_sp vm=%vm_sp
VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
vd=%vd_sp vm=%vm_dp
+
+VRINTR_sp ---- 1110 1.11 0110 .... 1010 01.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VRINTR_dp ---- 1110 1.11 0110 .... 1011 01.0 .... \
+ vd=%vd_dp vm=%vm_dp
+
+VRINTZ_sp ---- 1110 1.11 0110 .... 1010 11.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VRINTZ_dp ---- 1110 1.11 0110 .... 1011 11.0 .... \
+ vd=%vd_dp vm=%vm_dp
+
+VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... \
+ vd=%vd_dp vm=%vm_dp
--
2.20.1
- [Qemu-devel] [PATCH v2 21/42] target/arm: Convert VFP VNMLA to decodetree, (continued)
- [Qemu-devel] [PATCH v2 21/42] target/arm: Convert VFP VNMLA to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 16/42] target/arm: Convert the VFP load/store multiple insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 26/42] target/arm: Convert VDIV to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 39/42] target/arm: Convert VJCVT to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 35/42] target/arm: Convert the VCVT-to-f16 insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 42/42] target/arm: Fix short-vector increment behaviour, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 37/42] target/arm: Convert double-single precision conversion insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 28/42] target/arm: Convert VMOV (imm) to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 29/42] target/arm: Convert VABS to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 31/42] target/arm: Convert VSQRT to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 36/42] target/arm: Convert VFP round insns to decodetree,
Peter Maydell <=
- [Qemu-devel] [PATCH v2 34/42] target/arm: Convert the VCVT-from-f16 insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 30/42] target/arm: Convert VNEG to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 27/42] target/arm: Convert VFP fused multiply-add insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 40/42] target/arm: Convert VCVT fp/fixed-point conversion insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 32/42] target/arm: Convert VMOV (register) to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 33/42] target/arm: Convert VFP comparison insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 38/42] target/arm: Convert integer-to-float insns to decodetree, Peter Maydell, 2019/06/11
- [Qemu-devel] [PATCH v2 41/42] target/arm: Convert float-to-integer VCVT insns to decodetree, Peter Maydell, 2019/06/11