[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 40/55] target/arm: Implement MVE VQDMULL scalar
From: |
Peter Maydell |
Subject: |
[PATCH 40/55] target/arm: Implement MVE VQDMULL scalar |
Date: |
Mon, 7 Jun 2021 17:58:06 +0100 |
Implement the MVE VQDMULL scalar insn. This multiplies the top or
bottom half of each element by the scalar, doubles and saturates
to a double-width result.
Note that this encoding overlaps with VQADD and VQSUB; it uses
what in VQADD and VQSUB would be the 'size=0b11' encoding.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/helper-mve.h | 5 +++
target/arm/mve.decode | 23 +++++++++++---
target/arm/mve_helper.c | 65 ++++++++++++++++++++++++++++++++++++++
target/arm/translate-mve.c | 30 ++++++++++++++++++
4 files changed, 119 insertions(+), 4 deletions(-)
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 9bab04305a7..55c4e41deff 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -203,6 +203,11 @@ DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env,
ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr,
i32)
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr,
i32)
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr,
i32)
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr,
i32)
+
DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index 47ce6ebb83b..a71ad7252bf 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -23,6 +23,9 @@
%qm 5:1 1:3
%qn 7:1 17:3
+# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
+%size_28 28:1 !function=plus_1
+
&vldr_vstr rn qd imm p a w size l u
&1op qd qm size
&2op qd qm qn size
@@ -38,6 +41,7 @@
@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
size=0
@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
+@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
# Vector loads and stores
@@ -168,15 +172,26 @@ VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100
.... @2scalar
VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
-VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
-VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
-VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
-VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
+{
+ VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
+ VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
+ VQDMULLB_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 110 .... @2scalar_nosz \
+ size=%size_28
+}
+
+{
+ VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
+ VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
+ VQDMULLT_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 110 .... @2scalar_nosz \
+ size=%size_28
+}
+
VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
+
# Predicate operations
%mask_22_13 22:1 13:3
VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 6e2da6ac8bc..97529531ed0 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -600,6 +600,71 @@ DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, H1,
DO_QRDMULH_B)
DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, H2, DO_QRDMULH_H)
DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, H4, DO_QRDMULH_W)
+/*
+ * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
+ * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
+ * SATMASK specifies which bits of the predicate mask matter for determining
+ * whether to propagate a saturation indication into FPSCR.QC -- for
+ * the 16x16->32 case we must check only the bit corresponding to the T or B
+ * half that we used, but for the 32x32->64 case we propagate if the mask
+ * bit is set for either half.
+ */
+#define DO_2OP_SAT_SCALAR_L(OP, TOP, TYPE, H, LESIZE, LTYPE, LH, FN, SATMASK) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ uint32_t rm) \
+ { \
+ LTYPE *d = vd; \
+ TYPE *n = vn; \
+ TYPE m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned le; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ bool sat = false; \
+ LTYPE r = FN((LTYPE)n[H(le * 2 + TOP)], m, &sat); \
+ uint64_t bytemask = mask_to_bytemask##LESIZE(mask); \
+ d[LH(le)] &= ~bytemask; \
+ d[LH(le)] |= (r & bytemask); \
+ if (sat && (mask & SATMASK)) { \
+ env->vfp.qc[0] = 1; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
+{
+ int64_t r = ((int64_t)n * m) * 2;
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
+}
+
+static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
+{
+ /* The multiply can't overflow, but the doubling might */
+ int64_t r = (int64_t)n * m;
+ if (r > INT64_MAX / 2) {
+ *sat = true;
+ return INT64_MAX;
+ } else if (r < INT64_MIN / 2) {
+ *sat = true;
+ return INT64_MIN;
+ } else {
+ return r * 2;
+ }
+}
+
+#define SATMASK16B 1
+#define SATMASK16T (1 << 2)
+#define SATMASK32 ((1 << 4) | 1)
+
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, int16_t, H2, 4, int32_t, H4, \
+ do_qdmullh, SATMASK16B)
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, int32_t, H4, 8, int64_t, , \
+ do_qdmullw, SATMASK32)
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, int16_t, H2, 4, int32_t, H4, \
+ do_qdmullh, SATMASK16T)
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, int32_t, H4, 8, int64_t, , \
+ do_qdmullw, SATMASK32)
+
static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
{
m &= 0xff;
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 4d08067c1e2..2bb7482e6af 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -477,6 +477,36 @@ DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
DO_2OP_SCALAR(VBRSR, vbrsr)
+static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
+{
+ MVEGenTwoOpScalarFn *fns[] = {
+ NULL,
+ gen_helper_mve_vqdmullb_scalarh,
+ gen_helper_mve_vqdmullb_scalarw,
+ NULL,
+ };
+ if (a->qd == a->qn && a->size == MO_32) {
+ /* UNPREDICTABLE; we choose to undef */
+ return false;
+ }
+ return do_2op_scalar(s, a, fns[a->size]);
+}
+
+static bool trans_VQDMULLT_scalar(DisasContext *s, arg_2scalar *a)
+{
+ MVEGenTwoOpScalarFn *fns[] = {
+ NULL,
+ gen_helper_mve_vqdmullt_scalarh,
+ gen_helper_mve_vqdmullt_scalarw,
+ NULL,
+ };
+ if (a->qd == a->qn && a->size == MO_32) {
+ /* UNPREDICTABLE; we choose to undef */
+ return false;
+ }
+ return do_2op_scalar(s, a, fns[a->size]);
+}
+
static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
MVEGenDualAccOpFn *fn)
{
--
2.20.1
- Re: [PATCH 26/55] target/arm: Implement MVE VABD, (continued)
- [PATCH 24/55] target/arm: Implement MVE VRMULH, Peter Maydell, 2021/06/07
- [PATCH 39/55] target/arm: Implement MVE VQDMULH and VQRDMULH (scalar), Peter Maydell, 2021/06/07
- [PATCH 31/55] include/qemu/int128.h: Add function to create Int128 from int64_t, Peter Maydell, 2021/06/07
- [PATCH 36/55] target/arm: Implement MVE VBRSR, Peter Maydell, 2021/06/07
- [PATCH 40/55] target/arm: Implement MVE VQDMULL scalar,
Peter Maydell <=
- [PATCH 38/55] target/arm: Implement MVE VQADD and VQSUB, Peter Maydell, 2021/06/07
- [PATCH 34/55] target/arm: Implement MVE VSUB, VMUL (scalar), Peter Maydell, 2021/06/07
- [PATCH 23/55] target/arm: Implement MVE VMULH, Peter Maydell, 2021/06/07
- [PATCH 30/55] target/arm: Implement MVE VMLSLDAV, Peter Maydell, 2021/06/07
- [PATCH 41/55] target/arm: Implement MVE VQDMULH, VQRDMULH (vector), Peter Maydell, 2021/06/07