[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 12/38] tcg: Add gvec expanders for variable shif
From: |
David Hildenbrand |
Subject: |
Re: [Qemu-devel] [PATCH 12/38] tcg: Add gvec expanders for variable shift |
Date: |
Tue, 23 Apr 2019 21:04:05 +0200 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.6.1 |
In order to use this on s390x for VECTOR ELEMENT SHIFT, like
+static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s->fields, m4);
+ const uint8_t v1 = get_field(s->fields, v1);
+ const uint8_t v2 = get_field(s->fields, v2);
+ const uint8_t v3 = get_field(s->fields, v3);
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields->op2) {
+ case 0x70:
+ gen_gvec_fn_3(shlv, es, v1, v2, v3);
+ break;
+ case 0x7a:
+ gen_gvec_fn_3(sarv, es, v1, v2, v3);
+ break;
+ case 0x78:
+ gen_gvec_fn_3(shrv, es, v1, v2, v3);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return DISAS_NEXT;
+}
We need to mask of invalid bits from the shift. Can that be added?
On 20.04.19 09:34, Richard Henderson wrote:
> Signed-off-by: Richard Henderson <address@hidden>
> ---
> accel/tcg/tcg-runtime.h | 15 ++++
> tcg/tcg-op-gvec.h | 7 ++
> tcg/tcg-op.h | 4 ++
> accel/tcg/tcg-runtime-gvec.c | 132 +++++++++++++++++++++++++++++++++++
> tcg/tcg-op-gvec.c | 87 +++++++++++++++++++++++
> tcg/tcg-op-vec.c | 15 ++++
> 6 files changed, 260 insertions(+)
>
> diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
> index dfe325625c..ed3ce5fd91 100644
> --- a/accel/tcg/tcg-runtime.h
> +++ b/accel/tcg/tcg-runtime.h
> @@ -254,6 +254,21 @@ DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void,
> ptr, ptr, i32)
> DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
> DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
>
> +DEF_HELPER_FLAGS_4(gvec_shl8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_shl16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_shl32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_shl64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +
> +DEF_HELPER_FLAGS_4(gvec_shr8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_shr16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_shr32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_shr64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +
> +DEF_HELPER_FLAGS_4(gvec_sar8v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_sar16v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_sar32v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +DEF_HELPER_FLAGS_4(gvec_sar64v, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> +
> DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
> diff --git a/tcg/tcg-op-gvec.h b/tcg/tcg-op-gvec.h
> index 850da32ded..1cd18a959a 100644
> --- a/tcg/tcg-op-gvec.h
> +++ b/tcg/tcg-op-gvec.h
> @@ -294,6 +294,13 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs,
> uint32_t aofs,
> void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
> int64_t shift, uint32_t oprsz, uint32_t maxsz);
>
> +void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
> + uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
> +void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
> + uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
> +void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
> + uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
> +
> void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
> uint32_t aofs, uint32_t bofs,
> uint32_t oprsz, uint32_t maxsz);
> diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
> index 9fff9864f6..833c6330b5 100644
> --- a/tcg/tcg-op.h
> +++ b/tcg/tcg-op.h
> @@ -986,6 +986,10 @@ void tcg_gen_shli_vec(unsigned vece, TCGv_vec r,
> TCGv_vec a, int64_t i);
> void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
> void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
>
> +void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
> +void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
> +void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
> +
> void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
> TCGv_vec a, TCGv_vec b);
>
> diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
> index e2c6f24262..7b88f5590c 100644
> --- a/accel/tcg/tcg-runtime-gvec.c
> +++ b/accel/tcg/tcg-runtime-gvec.c
> @@ -725,6 +725,138 @@ void HELPER(gvec_sar64i)(void *d, void *a, uint32_t
> desc)
> clear_high(d, oprsz, desc);
> }
>
> +void HELPER(gvec_shl8v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
> + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << *(uint8_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_shl16v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
> + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << *(uint16_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_shl32v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
> + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << *(uint32_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_shl64v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
> + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << *(uint64_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_shr8v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
> + *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> *(uint8_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_shr16v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
> + *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> *(uint16_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_shr32v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
> + *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> *(uint32_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_shr64v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
> + *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> *(uint64_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_sar8v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(vec8)) {
> + *(int8_t *)(d + i) = *(int8_t *)(a + i) >> *(int8_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_sar16v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(int16_t)) {
> + *(int16_t *)(d + i) = *(int16_t *)(a + i) >> *(int16_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_sar32v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(vec32)) {
> + *(int32_t *)(d + i) = *(int32_t *)(a + i) >> *(int32_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> +void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)
> +{
> + intptr_t oprsz = simd_oprsz(desc);
> + intptr_t i;
> +
> + for (i = 0; i < oprsz; i += sizeof(vec64)) {
> + *(int64_t *)(d + i) = *(int64_t *)(a + i) >> *(int64_t *)(b + i);
> + }
> + clear_high(d, oprsz, desc);
> +}
> +
> /* If vectors are enabled, the compiler fills in -1 for true.
> Otherwise, we must take care of this by hand. */
> #ifdef CONFIG_VECTOR16
> diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
> index f056018713..5d28184045 100644
> --- a/tcg/tcg-op-gvec.c
> +++ b/tcg/tcg-op-gvec.c
> @@ -2382,6 +2382,93 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs,
> uint32_t aofs,
> }
> }
>
> +void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
> + uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
> +{
> + static const GVecGen3 g[4] = {
> + { .fniv = tcg_gen_shlv_vec,
> + .fno = gen_helper_gvec_shl8v,
> + .opc = INDEX_op_shlv_vec,
> + .vece = MO_8 },
> + { .fniv = tcg_gen_shlv_vec,
> + .fno = gen_helper_gvec_shl16v,
> + .opc = INDEX_op_shlv_vec,
> + .vece = MO_16 },
> + { .fni4 = tcg_gen_shl_i32,
> + .fniv = tcg_gen_shlv_vec,
> + .fno = gen_helper_gvec_shl32v,
> + .opc = INDEX_op_shlv_vec,
> + .vece = MO_32 },
> + { .fni8 = tcg_gen_shl_i64,
> + .fniv = tcg_gen_shlv_vec,
> + .fno = gen_helper_gvec_shl64v,
> + .opc = INDEX_op_shlv_vec,
> + .prefer_i64 = TCG_TARGET_REG_BITS == 64,
> + .vece = MO_64 },
> + };
> +
> + tcg_debug_assert(vece <= MO_64);
> + tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
> +}
> +
> +void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
> + uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
> +{
> + static const GVecGen3 g[4] = {
> + { .fniv = tcg_gen_shrv_vec,
> + .fno = gen_helper_gvec_shr8v,
> + .opc = INDEX_op_shrv_vec,
> + .vece = MO_8 },
> + { .fniv = tcg_gen_shrv_vec,
> + .fno = gen_helper_gvec_shr16v,
> + .opc = INDEX_op_shrv_vec,
> + .vece = MO_16 },
> + { .fni4 = tcg_gen_shr_i32,
> + .fniv = tcg_gen_shrv_vec,
> + .fno = gen_helper_gvec_shr32v,
> + .opc = INDEX_op_shrv_vec,
> + .vece = MO_32 },
> + { .fni8 = tcg_gen_shr_i64,
> + .fniv = tcg_gen_shrv_vec,
> + .fno = gen_helper_gvec_shr64v,
> + .opc = INDEX_op_shrv_vec,
> + .prefer_i64 = TCG_TARGET_REG_BITS == 64,
> + .vece = MO_64 },
> + };
> +
> + tcg_debug_assert(vece <= MO_64);
> + tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
> +}
> +
> +void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
> + uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
> +{
> + static const GVecGen3 g[4] = {
> + { .fniv = tcg_gen_sarv_vec,
> + .fno = gen_helper_gvec_sar8v,
> + .opc = INDEX_op_sarv_vec,
> + .vece = MO_8 },
> + { .fniv = tcg_gen_sarv_vec,
> + .fno = gen_helper_gvec_sar16v,
> + .opc = INDEX_op_sarv_vec,
> + .vece = MO_16 },
> + { .fni4 = tcg_gen_sar_i32,
> + .fniv = tcg_gen_sarv_vec,
> + .fno = gen_helper_gvec_sar32v,
> + .opc = INDEX_op_sarv_vec,
> + .vece = MO_32 },
> + { .fni8 = tcg_gen_sar_i64,
> + .fniv = tcg_gen_sarv_vec,
> + .fno = gen_helper_gvec_sar64v,
> + .opc = INDEX_op_sarv_vec,
> + .prefer_i64 = TCG_TARGET_REG_BITS == 64,
> + .vece = MO_64 },
> + };
> +
> + tcg_debug_assert(vece <= MO_64);
> + tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]);
> +}
> +
> /* Expand OPSZ bytes worth of three-operand operations using i32 elements.
> */
> static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> uint32_t oprsz, TCGCond cond)
> diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
> index ce7987b858..6601cb8a8f 100644
> --- a/tcg/tcg-op-vec.c
> +++ b/tcg/tcg-op-vec.c
> @@ -481,3 +481,18 @@ void tcg_gen_umax_vec(unsigned vece, TCGv_vec r,
> TCGv_vec a, TCGv_vec b)
> {
> do_op3(vece, r, a, b, INDEX_op_umax_vec);
> }
> +
> +void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
> +{
> + do_op3(vece, r, a, b, INDEX_op_shlv_vec);
> +}
> +
> +void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
> +{
> + do_op3(vece, r, a, b, INDEX_op_shrv_vec);
> +}
> +
> +void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
> +{
> + do_op3(vece, r, a, b, INDEX_op_sarv_vec);
> +}
>
--
Thanks,
David / dhildenb
- Re: [Qemu-devel] [PATCH 03/38] tcg: Return bool success from tcg_out_mov, (continued)
- [Qemu-devel] [PATCH 05/38] tcg: Allow add_vec, sub_vec, neg_vec, not_vec to be expanded, Richard Henderson, 2019/04/20
- [Qemu-devel] [PATCH 06/38] tcg: Promote tcg_out_{dup, dupi}_vec to backend interface, Richard Henderson, 2019/04/20
- [Qemu-devel] [PATCH 08/38] tcg: Add tcg_out_dupm_vec to the backend interface, Richard Henderson, 2019/04/20
- [Qemu-devel] [PATCH 10/38] tcg/aarch64: Implement tcg_out_dupm_vec, Richard Henderson, 2019/04/20
- [Qemu-devel] [PATCH 07/38] tcg: Manually expand INDEX_op_dup_vec, Richard Henderson, 2019/04/20
- [Qemu-devel] [PATCH 09/38] tcg/i386: Implement tcg_out_dupm_vec, Richard Henderson, 2019/04/20
- [Qemu-devel] [PATCH 11/38] tcg: Add INDEX_op_dup_mem_vec, Richard Henderson, 2019/04/20
- [Qemu-devel] [PATCH 12/38] tcg: Add gvec expanders for variable shift, Richard Henderson, 2019/04/20
- Re: [Qemu-devel] [PATCH 12/38] tcg: Add gvec expanders for variable shift,
David Hildenbrand <=
[Qemu-devel] [PATCH 13/38] tcg/i386: Support vector variable shift opcodes, Richard Henderson, 2019/04/20
[Qemu-devel] [PATCH 14/38] tcg/aarch64: Support vector variable shift opcodes, Richard Henderson, 2019/04/20
[Qemu-devel] [PATCH 15/38] tcg: Implement tcg_gen_gvec_3i(), Richard Henderson, 2019/04/20
[Qemu-devel] [PATCH 17/38] tcg: Add gvec expanders for vector shift by scalar, Richard Henderson, 2019/04/20