[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH qemu v4 14/14] target/riscv: rvv: Add tail agnostic for vector pe
From: |
~eopxd |
Subject: |
[PATCH qemu v4 14/14] target/riscv: rvv: Add tail agnostic for vector permutation instructions |
Date: |
Thu, 24 Mar 2022 19:03:53 -0000 |
From: eopXD <eop.chen@sifive.com>
Signed-off-by: eop Chen <eop.chen@sifive.com>
Reviewed-by: Frank Chang <frank.chang@sifive.com>
---
target/riscv/insn_trans/trans_rvv.c.inc | 20 ++++++++
target/riscv/vector_helper.c | 68 ++++++++++++++++++++++---
2 files changed, 82 insertions(+), 6 deletions(-)
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc
b/target/riscv/insn_trans/trans_rvv.c.inc
index 8b24570e22..f037d1875c 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -3724,6 +3724,15 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr
*a)
}
if (a->vm && s->vl_eq_vlmax) {
+ if (s->vta && s->lmul < 0) {
+ /* tail elements may pass vlmax when lmul < 0
+ * set tail elements to 1s
+ */
+ uint32_t vlenb = s->cfg_ptr->vlen >> 3;
+ tcg_gen_gvec_ori(s->sew, vreg_ofs(s, a->rd),
+ vreg_ofs(s, a->rd), -1,
+ vlenb, vlenb);
+ }
int scale = s->lmul - (s->sew + 3);
int vlmax = scale < 0 ?
s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale;
@@ -3757,6 +3766,15 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr
*a)
}
if (a->vm && s->vl_eq_vlmax) {
+ if (s->vta && s->lmul < 0) {
+ /* tail elements may pass vlmax when lmul < 0
+ * set tail elements to 1s
+ */
+ uint32_t vlenb = s->cfg_ptr->vlen >> 3;
+ tcg_gen_gvec_ori(s->sew, vreg_ofs(s, a->rd),
+ vreg_ofs(s, a->rd), -1,
+ vlenb, vlenb);
+ }
int scale = s->lmul - (s->sew + 3);
int vlmax = scale < 0 ?
s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale;
@@ -3809,6 +3827,7 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
cpu_env, s->cfg_ptr->vlen / 8,
@@ -3914,6 +3933,7 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a,
uint8_t seq)
}
data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
vreg_ofs(s, a->rs2), cpu_env,
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 0670489679..6e13d6bdcf 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -4944,6 +4944,10 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
{ \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
target_ulong offset = s1, i_min, i; \
\
i_min = MAX(env->vstart, offset); \
@@ -4953,6 +4957,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
} \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
} \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
@@ -4965,12 +4972,16 @@ GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8)
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
CPURISCVState *env, uint32_t desc) \
{ \
- uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
+ uint32_t max_elem = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
target_ulong i_max, i; \
\
- i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
+ i_max = MAX(MIN(s1 < max_elem ? max_elem - s1 : 0, vl), env->vstart); \
for (i = env->vstart; i < i_max; ++i) { \
if (vm || vext_elem_mask(v0, i)) { \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
@@ -4984,6 +4995,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
} \
\
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
@@ -4999,6 +5013,10 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
typedef uint##BITWIDTH##_t ETYPE; \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -5012,6 +5030,9 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
GEN_VEXT_VSLIE1UP(8, H1)
@@ -5039,6 +5060,10 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
typedef uint##BITWIDTH##_t ETYPE; \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -5052,6 +5077,9 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0,
target_ulong s1, \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
GEN_VEXT_VSLIDE1DOWN(8, H1)
@@ -5102,9 +5130,13 @@ GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d, 64)
void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
CPURISCVState *env, uint32_t desc) \
{ \
- uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2))); \
+ uint32_t max_elem = vext_max_elems(desc, ctzl(sizeof(TS2))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(TS2); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
uint64_t index; \
uint32_t i; \
\
@@ -5113,13 +5145,16 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
*vs2, \
continue; \
} \
index = *((TS1 *)vs1 + HS1(i)); \
- if (index >= vlmax) { \
+ if (index >= max_elem) { \
*((TS2 *)vd + HS2(i)) = 0; \
} else { \
*((TS2 *)vd + HS2(i)) = *((TS2 *)vs2 + HS2(index)); \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
@@ -5137,9 +5172,13 @@ GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_d, uint16_t,
uint64_t, H2, H8)
void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
CPURISCVState *env, uint32_t desc) \
{ \
- uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
+ uint32_t max_elem = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
uint32_t vm = vext_vm(desc); \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
uint64_t index = s1; \
uint32_t i; \
\
@@ -5147,13 +5186,16 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
void *vs2, \
if (!vm && !vext_elem_mask(v0, i)) { \
continue; \
} \
- if (index >= vlmax) { \
+ if (index >= max_elem) { \
*((ETYPE *)vd + H(i)) = 0; \
} else { \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
} \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
@@ -5168,6 +5210,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
*vs2, \
CPURISCVState *env, uint32_t desc) \
{ \
uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
uint32_t num = 0, i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -5178,6 +5224,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
*vs2, \
num++; \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
/* Compress into vd elements of vs2 where vs1 is enabled */
@@ -5214,6 +5263,10 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,
\
{ \
uint32_t vl = env->vl; \
uint32_t vm = vext_vm(desc); \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t total_elems = \
+ vext_get_total_elems(env_archcpu(env), env->vtype); \
+ uint32_t vta = vext_vta(desc); \
uint32_t i; \
\
for (i = env->vstart; i < vl; i++) { \
@@ -5223,6 +5276,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,
\
*((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
} \
env->vstart = 0; \
+ /* set tail elements to 1s */ \
+ vext_set_elems_1s_fns[ctzl(esz)](vd, vta, vl, vl * esz, \
+ total_elems * esz); \
}
GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
--
2.34.1
- [PATCH qemu v4 07/14] target/riscv: rvv: Add tail agnostic for vector integer shift instructions, (continued)
- [PATCH qemu v4 07/14] target/riscv: rvv: Add tail agnostic for vector integer shift instructions, ~eopxd, 2022/03/24
- [PATCH qemu v4 01/14] target/riscv: rvv: Prune redundant ESZ, DSZ parameter passed, ~eopxd, 2022/03/24
- [PATCH qemu v4 05/14] target/riscv: rvv: Add tail agnostic for vector load / store instructions, ~eopxd, 2022/03/24
- [PATCH qemu v4 06/14] target/riscv: rvv: Add tail agnostic for vx, vvm, vxm instructions, ~eopxd, 2022/03/24
- [PATCH qemu v4 10/14] target/riscv: rvv: Add tail agnostic for vector fix-point arithmetic instructions, ~eopxd, 2022/03/24
- [PATCH qemu v4 09/14] target/riscv: rvv: Add tail agnostic for vector integer merge and move instructions, ~eopxd, 2022/03/24
- [PATCH qemu v4 12/14] target/riscv: rvv: Add tail agnostic for vector reduction instructions, ~eopxd, 2022/03/24
- [PATCH qemu v4 14/14] target/riscv: rvv: Add tail agnostic for vector permutation instructions,
~eopxd <=
- [PATCH qemu v4 13/14] target/riscv: rvv: Add tail agnostic for vector mask instructions, ~eopxd, 2022/03/24
- [PATCH qemu v4 11/14] target/riscv: rvv: Add tail agnostic for vector floating-point instructions, ~eopxd, 2022/03/24