[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[qemu-s390x] [PATCH v1 20/41] s390x/tcg: Implement VECTOR MULTIPLY *
From: |
David Hildenbrand |
Subject: |
[qemu-s390x] [PATCH v1 20/41] s390x/tcg: Implement VECTOR MULTIPLY * |
Date: |
Thu, 11 Apr 2019 12:08:15 +0200 |
Yet another set of variants. Implement it similar to VECTOR MULTIPLY AND
ADD *. At least for one variant we have a gvec helper we can reuse.
Signed-off-by: David Hildenbrand <address@hidden>
---
target/s390x/helper.h | 16 +++++
target/s390x/insn-data.def | 14 +++++
target/s390x/translate_vx.inc.c | 100 ++++++++++++++++++++++++++++++++
target/s390x/vec_int_helper.c | 100 ++++++++++++++++++++++++++++++++
4 files changed, 230 insertions(+)
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index b73a35107e..a44cc462ae 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -184,6 +184,22 @@ DEF_HELPER_FLAGS_5(gvec_vmao32, TCG_CALL_NO_RWG, void,
ptr, cptr, cptr, cptr, i3
DEF_HELPER_FLAGS_5(gvec_vmalo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr,
i32)
DEF_HELPER_FLAGS_5(gvec_vmalo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr,
i32)
DEF_HELPER_FLAGS_5(gvec_vmalo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr,
i32)
+DEF_HELPER_FLAGS_4(gvec_vmh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index 7ccec0544f..2c794a2744 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -1120,6 +1120,20 @@
F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
/* VECTOR MULTIPLY AND ADD LOGICAL ODD */
F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY HIGH */
+ F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL HIGH */
+ F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOW */
+ F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY EVEN */
+ F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL EVEN */
+ F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY ODD */
+ F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL ODD */
+ F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
index 4967af6a07..53bbb4a2ce 100644
--- a/target/s390x/translate_vx.inc.c
+++ b/target/s390x/translate_vx.inc.c
@@ -1689,3 +1689,103 @@ static DisasJumpType op_vma(DisasContext *s, DisasOps
*o)
get_field(s->fields, v3), get_field(s->fields, v4), fn);
return DISAS_NEXT;
}
+
+static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t0, a);
+ tcg_gen_ext_i32_i64(t1, b);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_extrh_i64_i32(d, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(t0, a);
+ tcg_gen_extu_i32_i64(t1, b);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_extrh_i64_i32(d, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s->fields, m4);
+ static const GVecGen3 g_vmh[3] = {
+ { .fno = gen_helper_gvec_vmh8, },
+ { .fno = gen_helper_gvec_vmh16, },
+ { .fni4 = gen_mh_i32, },
+ };
+ static const GVecGen3 g_vmlh[3] = {
+ { .fno = gen_helper_gvec_vmlh8, },
+ { .fno = gen_helper_gvec_vmlh16, },
+ { .fni4 = gen_mlh_i32, },
+ };
+ static const GVecGen3 g_vme[3] = {
+ { .fno = gen_helper_gvec_vme8, },
+ { .fno = gen_helper_gvec_vme16, },
+ { .fno = gen_helper_gvec_vme32, },
+ };
+ static const GVecGen3 g_vmle[3] = {
+ { .fno = gen_helper_gvec_vmle8, },
+ { .fno = gen_helper_gvec_vmle16, },
+ { .fno = gen_helper_gvec_vmle32, },
+ };
+ static const GVecGen3 g_vmo[3] = {
+ { .fno = gen_helper_gvec_vmo8, },
+ { .fno = gen_helper_gvec_vmo16, },
+ { .fno = gen_helper_gvec_vmo32, },
+ };
+ static const GVecGen3 g_vmlo[3] = {
+ { .fno = gen_helper_gvec_vmlo8, },
+ { .fno = gen_helper_gvec_vmlo16, },
+ { .fno = gen_helper_gvec_vmlo32, },
+ };
+ const GVecGen3 *fn;
+
+ if (es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields->op2) {
+ case 0xa2:
+ gen_gvec_fn_3(mul, es, get_field(s->fields, v1),
+ get_field(s->fields, v2), get_field(s->fields, v3));
+ return DISAS_NEXT;
+ case 0xa3:
+ fn = &g_vmh[es];
+ break;
+ case 0xa1:
+ fn = &g_vmlh[es];
+ break;
+ case 0xa6:
+ fn = &g_vme[es];
+ break;
+ case 0xa4:
+ fn = &g_vmle[es];
+ break;
+ case 0xa7:
+ fn = &g_vmo[es];
+ break;
+ case 0xa5:
+ fn = &g_vmlo[es];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2),
+ get_field(s->fields, v3), fn);
+ return DISAS_NEXT;
+}
diff --git a/target/s390x/vec_int_helper.c b/target/s390x/vec_int_helper.c
index 424f248325..b818c513a9 100644
--- a/target/s390x/vec_int_helper.c
+++ b/target/s390x/vec_int_helper.c
@@ -426,3 +426,103 @@ void HELPER(gvec_vmalo##BITS)(void *v1, const void *v2,
const void *v3, \
DEF_VMALO(8, 16)
DEF_VMALO(16, 32)
DEF_VMALO(32, 64)
+
+#define DEF_VMH(BITS)
\
+void HELPER(gvec_vmh##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i;
\
+
\
+ for (i = 0; i < (128 / BITS); i++) {
\
+ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i);
\
+ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i);
\
+
\
+ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS);
\
+ }
\
+}
+DEF_VMH(8)
+DEF_VMH(16)
+
+#define DEF_VMLH(BITS)
\
+void HELPER(gvec_vmlh##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i;
\
+
\
+ for (i = 0; i < (128 / BITS); i++) {
\
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i);
\
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i);
\
+
\
+ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS);
\
+ }
\
+}
+DEF_VMLH(8)
+DEF_VMLH(16)
+
+#define DEF_VME(BITS, TBITS)
\
+void HELPER(gvec_vme##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) {
\
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j);
\
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VME(8, 16)
+DEF_VME(16, 32)
+DEF_VME(32, 64)
+
+#define DEF_VMLE(BITS, TBITS)
\
+void HELPER(gvec_vmle##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) {
\
+ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j);
\
+ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VMLE(8, 16)
+DEF_VMLE(16, 32)
+DEF_VMLE(32, 64)
+
+#define DEF_VMO(BITS, TBITS)
\
+void HELPER(gvec_vmo##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) {
\
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j);
\
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VMO(8, 16)
+DEF_VMO(16, 32)
+DEF_VMO(32, 64)
+
+#define DEF_VMLO(BITS, TBITS)
\
+void HELPER(gvec_vmlo##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) {
\
+ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j);
\
+ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VMLO(8, 16)
+DEF_VMLO(16, 32)
+DEF_VMLO(32, 64)
--
2.20.1
- Re: [qemu-s390x] [Qemu-devel] [PATCH v1 17/41] s390x/tcg: Implement VECTOR LOAD POSITIVE, (continued)
- [qemu-s390x] [PATCH v1 21/41] s390x/tcg: Implement VECTOR NAND, David Hildenbrand, 2019/04/11
- [qemu-s390x] [PATCH v1 18/41] s390x/tcg: Implement VECTOR (MAXIMUM|MINIMUM) (LOGICAL), David Hildenbrand, 2019/04/11
- [qemu-s390x] [PATCH v1 16/41] s390x/tcg: Implement VECTOR LOAD COMPLEMENT, David Hildenbrand, 2019/04/11
- [qemu-s390x] [PATCH v1 22/41] s390x/tcg: Implement VECTOR NOR, David Hildenbrand, 2019/04/11
- [qemu-s390x] [PATCH v1 20/41] s390x/tcg: Implement VECTOR MULTIPLY *,
David Hildenbrand <=
- [qemu-s390x] [PATCH v1 19/41] s390x/tcg: Implement VECTOR MULTIPLY AND ADD *, David Hildenbrand, 2019/04/11
- [qemu-s390x] [PATCH v1 14/41] s390x/tcg: Implement VECTOR EXCLUSIVE OR, David Hildenbrand, 2019/04/11
- [qemu-s390x] [PATCH v1 06/41] s390x/tcg: Implement VECTOR AND (WITH COMPLEMENT), David Hildenbrand, 2019/04/11
- [qemu-s390x] [PATCH v1 15/41] s390x/tcg: Implement VECTOR GALOIS FIELD MULTIPLY SUM (AND ACCUMULATE), David Hildenbrand, 2019/04/11