[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[qemu-s390x] [PATCH v2 20/41] s390x/tcg: Implement VECTOR MULTIPLY *
From: |
David Hildenbrand |
Subject: |
[qemu-s390x] [PATCH v2 20/41] s390x/tcg: Implement VECTOR MULTIPLY * |
Date: |
Tue, 16 Apr 2019 20:52:40 +0200 |
Yet another set of variants. Implement it similar to VECTOR MULTIPLY AND
ADD *. At least for one variant we have a gvec helper we can reuse.
Reviewed-by: Richard Henderson <address@hidden>
Signed-off-by: David Hildenbrand <address@hidden>
---
target/s390x/helper.h | 16 +++++
target/s390x/insn-data.def | 14 +++++
target/s390x/translate_vx.inc.c | 88 ++++++++++++++++++++++++++++
target/s390x/vec_int_helper.c | 100 ++++++++++++++++++++++++++++++++
4 files changed, 218 insertions(+)
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index 924f97c59d..01b2009271 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -182,6 +182,22 @@ DEF_HELPER_FLAGS_5(gvec_vmao32, TCG_CALL_NO_RWG, void,
ptr, cptr, cptr, cptr, i3
DEF_HELPER_FLAGS_5(gvec_vmalo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr,
i32)
DEF_HELPER_FLAGS_5(gvec_vmalo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr,
i32)
DEF_HELPER_FLAGS_5(gvec_vmalo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr,
i32)
+DEF_HELPER_FLAGS_4(gvec_vmh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index 7ccec0544f..2c794a2744 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -1120,6 +1120,20 @@
F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
/* VECTOR MULTIPLY AND ADD LOGICAL ODD */
F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY HIGH */
+ F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL HIGH */
+ F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOW */
+ F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY EVEN */
+ F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL EVEN */
+ F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY ODD */
+ F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL ODD */
+ F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
index 48abb28f91..fddab1f47e 100644
--- a/target/s390x/translate_vx.inc.c
+++ b/target/s390x/translate_vx.inc.c
@@ -1727,3 +1727,91 @@ static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
get_field(s->fields, v3), get_field(s->fields, v4), fn);
return DISAS_NEXT;
}
+
+static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_muls2_i32(t, d, a, b);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_mulu2_i32(t, d, a, b);
+ tcg_temp_free_i32(t);
+}
+
+static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s->fields, m4);
+ static const GVecGen3 g_vmh[3] = {
+ { .fno = gen_helper_gvec_vmh8, },
+ { .fno = gen_helper_gvec_vmh16, },
+ { .fni4 = gen_mh_i32, },
+ };
+ static const GVecGen3 g_vmlh[3] = {
+ { .fno = gen_helper_gvec_vmlh8, },
+ { .fno = gen_helper_gvec_vmlh16, },
+ { .fni4 = gen_mlh_i32, },
+ };
+ static const GVecGen3 g_vme[3] = {
+ { .fno = gen_helper_gvec_vme8, },
+ { .fno = gen_helper_gvec_vme16, },
+ { .fno = gen_helper_gvec_vme32, },
+ };
+ static const GVecGen3 g_vmle[3] = {
+ { .fno = gen_helper_gvec_vmle8, },
+ { .fno = gen_helper_gvec_vmle16, },
+ { .fno = gen_helper_gvec_vmle32, },
+ };
+ static const GVecGen3 g_vmo[3] = {
+ { .fno = gen_helper_gvec_vmo8, },
+ { .fno = gen_helper_gvec_vmo16, },
+ { .fno = gen_helper_gvec_vmo32, },
+ };
+ static const GVecGen3 g_vmlo[3] = {
+ { .fno = gen_helper_gvec_vmlo8, },
+ { .fno = gen_helper_gvec_vmlo16, },
+ { .fno = gen_helper_gvec_vmlo32, },
+ };
+ const GVecGen3 *fn;
+
+ if (es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields->op2) {
+ case 0xa2:
+ gen_gvec_fn_3(mul, es, get_field(s->fields, v1),
+ get_field(s->fields, v2), get_field(s->fields, v3));
+ return DISAS_NEXT;
+ case 0xa3:
+ fn = &g_vmh[es];
+ break;
+ case 0xa1:
+ fn = &g_vmlh[es];
+ break;
+ case 0xa6:
+ fn = &g_vme[es];
+ break;
+ case 0xa4:
+ fn = &g_vmle[es];
+ break;
+ case 0xa7:
+ fn = &g_vmo[es];
+ break;
+ case 0xa5:
+ fn = &g_vmlo[es];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ gen_gvec_3(get_field(s->fields, v1), get_field(s->fields, v2),
+ get_field(s->fields, v3), fn);
+ return DISAS_NEXT;
+}
diff --git a/target/s390x/vec_int_helper.c b/target/s390x/vec_int_helper.c
index fb4c1422f3..9bb607af05 100644
--- a/target/s390x/vec_int_helper.c
+++ b/target/s390x/vec_int_helper.c
@@ -378,3 +378,103 @@ void HELPER(gvec_vmalo##BITS)(void *v1, const void *v2,
const void *v3, \
DEF_VMALO(8, 16)
DEF_VMALO(16, 32)
DEF_VMALO(32, 64)
+
+#define DEF_VMH(BITS)
\
+void HELPER(gvec_vmh##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i;
\
+
\
+ for (i = 0; i < (128 / BITS); i++) {
\
+ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i);
\
+ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i);
\
+
\
+ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS);
\
+ }
\
+}
+DEF_VMH(8)
+DEF_VMH(16)
+
+#define DEF_VMLH(BITS)
\
+void HELPER(gvec_vmlh##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i;
\
+
\
+ for (i = 0; i < (128 / BITS); i++) {
\
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i);
\
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i);
\
+
\
+ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS);
\
+ }
\
+}
+DEF_VMLH(8)
+DEF_VMLH(16)
+
+#define DEF_VME(BITS, TBITS)
\
+void HELPER(gvec_vme##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) {
\
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j);
\
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VME(8, 16)
+DEF_VME(16, 32)
+DEF_VME(32, 64)
+
+#define DEF_VMLE(BITS, TBITS)
\
+void HELPER(gvec_vmle##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) {
\
+ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j);
\
+ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VMLE(8, 16)
+DEF_VMLE(16, 32)
+DEF_VMLE(32, 64)
+
+#define DEF_VMO(BITS, TBITS)
\
+void HELPER(gvec_vmo##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) {
\
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j);
\
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VMO(8, 16)
+DEF_VMO(16, 32)
+DEF_VMO(32, 64)
+
+#define DEF_VMLO(BITS, TBITS)
\
+void HELPER(gvec_vmlo##BITS)(void *v1, const void *v2, const void *v3,
\
+ uint32_t desc)
\
+{
\
+ int i, j;
\
+
\
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) {
\
+ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j);
\
+ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j);
\
+
\
+ s390_vec_write_element##TBITS(v1, i, a * b);
\
+ }
\
+}
+DEF_VMLO(8, 16)
+DEF_VMLO(16, 32)
+DEF_VMLO(32, 64)
--
2.20.1
- [qemu-s390x] [PATCH v2 12/41] s390x/tcg: Implement VECTOR COUNT LEADING ZEROS, (continued)
- [qemu-s390x] [PATCH v2 12/41] s390x/tcg: Implement VECTOR COUNT LEADING ZEROS, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 14/41] s390x/tcg: Implement VECTOR EXCLUSIVE OR, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 16/41] s390x/tcg: Implement VECTOR LOAD COMPLEMENT, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 15/41] s390x/tcg: Implement VECTOR GALOIS FIELD MULTIPLY SUM (AND ACCUMULATE), David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 17/41] s390x/tcg: Implement VECTOR LOAD POSITIVE, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 23/41] s390x/tcg: Implement VECTOR NOT EXCLUSIVE OR, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 18/41] s390x/tcg: Implement VECTOR (MAXIMUM|MINIMUM) (LOGICAL), David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 21/41] s390x/tcg: Implement VECTOR NAND, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 19/41] s390x/tcg: Implement VECTOR MULTIPLY AND ADD *, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 25/41] s390x/tcg: Implement VECTOR OR WITH COMPLEMENT, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 20/41] s390x/tcg: Implement VECTOR MULTIPLY *,
David Hildenbrand <=
- [qemu-s390x] [PATCH v2 24/41] s390x/tcg: Implement VECTOR OR, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 22/41] s390x/tcg: Implement VECTOR NOR, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 26/41] s390x/tcg: Implement VECTOR POPULATION COUNT, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 28/41] s390x/tcg: Implement VECTOR ELEMENT ROTATE AND INSERT UNDER MASK, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 27/41] s390x/tcg: Implement VECTOR ELEMENT ROTATE LEFT LOGICAL, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 29/41] s390x/tcg: Implement VECTOR ELEMENT SHIFT, David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 30/41] s390x/tcg: Implement VECTOR SHIFT LEFT (BY BYTE), David Hildenbrand, 2019/04/16
- [qemu-s390x] [PATCH v2 31/41] s390x/tcg: Implement VECTOR SHIFT LEFT DOUBLE BY BYTE, David Hildenbrand, 2019/04/16