[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 05/37] target/riscv: SIMD 16-bit Shift Instructions
From: |
LIU Zhiwei |
Subject: |
[PATCH v2 05/37] target/riscv: SIMD 16-bit Shift Instructions |
Date: |
Thu, 10 Jun 2021 15:58:36 +0800 |
Instructions include right arithmetic shift, right logic shift,
and left shift.
The shift can be an immediate or a register scalar. The
right shift has rounding operation. And the left shift
has saturation operation.
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
---
include/tcg/tcg-op-gvec.h | 9 ++
target/riscv/helper.h | 9 ++
target/riscv/insn32.decode | 17 ++++
target/riscv/insn_trans/trans_rvp.c.inc | 59 ++++++++++++++
target/riscv/packed_helper.c | 104 ++++++++++++++++++++++++
tcg/tcg-op-gvec.c | 28 +++++++
6 files changed, 226 insertions(+)
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
index 392c0f95a4..72cf697646 100644
--- a/include/tcg/tcg-op-gvec.h
+++ b/include/tcg/tcg-op-gvec.h
@@ -398,10 +398,13 @@ void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a,
TCGv_i64 b);
void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
+void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
@@ -410,11 +413,17 @@ void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a,
int64_t c);
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i64
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i64
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i64
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i64
#else
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
#define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i32
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
+#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i32
+#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i32
+#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i32
#endif
#endif
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 629ff13402..de7b4fc17d 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1188,3 +1188,12 @@ DEF_HELPER_3(rsub8, tl, env, tl, tl)
DEF_HELPER_3(ursub8, tl, env, tl, tl)
DEF_HELPER_3(ksub8, tl, env, tl, tl)
DEF_HELPER_3(uksub8, tl, env, tl, tl)
+
+DEF_HELPER_3(sra16, tl, env, tl, tl)
+DEF_HELPER_3(sra16_u, tl, env, tl, tl)
+DEF_HELPER_3(srl16, tl, env, tl, tl)
+DEF_HELPER_3(srl16_u, tl, env, tl, tl)
+DEF_HELPER_3(sll16, tl, env, tl, tl)
+DEF_HELPER_3(ksll16, tl, env, tl, tl)
+DEF_HELPER_3(kslra16, tl, env, tl, tl)
+DEF_HELPER_3(kslra16_u, tl, env, tl, tl)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 13e1222296..44c497f28a 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -24,6 +24,7 @@
%sh5 20:5
%sh7 20:7
+%sh4 20:4
%csr 20:12
%rm 12:3
%nf 29:3 !function=ex_plus_1
@@ -61,6 +62,7 @@
@j .................... ..... ....... &j imm=%imm_j
%rd
@sh ...... ...... ..... ... ..... ....... &shift shamt=%sh7 %rs1
%rd
+@sh4 ...... ...... ..... ... ..... ....... &shift shamt=%sh4 %rs1
%rd
@csr ............ ..... ... ..... ....... %csr %rs1
%rd
@atom_ld ..... aq:1 rl:1 ..... ........ ..... ....... &atomic rs2=0 %rs1
%rd
@@ -775,3 +777,18 @@ rsub8 0000101 ..... ..... 000 ..... 1110111 @r
ursub8 0010101 ..... ..... 000 ..... 1110111 @r
ksub8 0001101 ..... ..... 000 ..... 1110111 @r
uksub8 0011101 ..... ..... 000 ..... 1110111 @r
+
+sra16 0101000 ..... ..... 000 ..... 1110111 @r
+sra16_u 0110000 ..... ..... 000 ..... 1110111 @r
+srai16 0111000 0.... ..... 000 ..... 1110111 @sh4
+srai16_u 0111000 1.... ..... 000 ..... 1110111 @sh4
+srl16 0101001 ..... ..... 000 ..... 1110111 @r
+srl16_u 0110001 ..... ..... 000 ..... 1110111 @r
+srli16 0111001 0.... ..... 000 ..... 1110111 @sh4
+srli16_u 0111001 1.... ..... 000 ..... 1110111 @sh4
+sll16 0101010 ..... ..... 000 ..... 1110111 @r
+slli16 0111010 0.... ..... 000 ..... 1110111 @sh4
+ksll16 0110010 ..... ..... 000 ..... 1110111 @r
+kslli16 0111010 1.... ..... 000 ..... 1110111 @sh4
+kslra16 0101011 ..... ..... 000 ..... 1110111 @r
+kslra16_u 0110011 ..... ..... 000 ..... 1110111 @r
diff --git a/target/riscv/insn_trans/trans_rvp.c.inc
b/target/riscv/insn_trans/trans_rvp.c.inc
index 80bec35ac9..afafa49824 100644
--- a/target/riscv/insn_trans/trans_rvp.c.inc
+++ b/target/riscv/insn_trans/trans_rvp.c.inc
@@ -128,3 +128,62 @@ GEN_RVP_R_OOL(rsub8);
GEN_RVP_R_OOL(ursub8);
GEN_RVP_R_OOL(ksub8);
GEN_RVP_R_OOL(uksub8);
+
+/* 16-bit Shift Instructions */
+GEN_RVP_R_OOL(sra16);
+GEN_RVP_R_OOL(srl16);
+GEN_RVP_R_OOL(sll16);
+GEN_RVP_R_OOL(sra16_u);
+GEN_RVP_R_OOL(srl16_u);
+GEN_RVP_R_OOL(ksll16);
+GEN_RVP_R_OOL(kslra16);
+GEN_RVP_R_OOL(kslra16_u);
+
+static bool
+rvp_shifti_ool(DisasContext *ctx, arg_shift *a,
+ void (* fn)(TCGv, TCGv_ptr, TCGv, TCGv))
+{
+ TCGv src1, dst, shift;
+
+ src1 = tcg_temp_new();
+ dst = tcg_temp_new();
+
+ gen_get_gpr(src1, a->rs1);
+ shift = tcg_const_tl(a->shamt);
+ fn(dst, cpu_env, src1, shift);
+ gen_set_gpr(a->rd, dst);
+
+ tcg_temp_free(src1);
+ tcg_temp_free(dst);
+ tcg_temp_free(shift);
+ return true;
+}
+
+static inline bool
+rvp_shifti(DisasContext *ctx, arg_shift *a,
+ void (* vecop)(TCGv, TCGv, target_long),
+ void (* op)(TCGv, TCGv_ptr, TCGv, TCGv))
+{
+ if (!has_ext(ctx, RVP)) {
+ return false;
+ }
+
+ if (a->rd && a->rs1 && vecop) {
+ vecop(cpu_gpr[a->rd], cpu_gpr[a->rs1], a->shamt);
+ return true;
+ }
+ return rvp_shifti_ool(ctx, a, op);
+}
+
+#define GEN_RVP_SHIFTI(NAME, VECOP, OP) \
+static bool trans_##NAME(DisasContext *s, arg_shift *a) \
+{ \
+ return rvp_shifti(s, a, VECOP, OP); \
+}
+
+GEN_RVP_SHIFTI(srai16, tcg_gen_vec_sar16i_tl, gen_helper_sra16);
+GEN_RVP_SHIFTI(srli16, tcg_gen_vec_shr16i_tl, gen_helper_srl16);
+GEN_RVP_SHIFTI(slli16, tcg_gen_vec_shl16i_tl, gen_helper_sll16);
+GEN_RVP_SHIFTI(srai16_u, NULL, gen_helper_sra16_u);
+GEN_RVP_SHIFTI(srli16_u, NULL, gen_helper_srl16_u);
+GEN_RVP_SHIFTI(kslli16, NULL, gen_helper_ksll16);
diff --git a/target/riscv/packed_helper.c b/target/riscv/packed_helper.c
index 62db072204..7e31c2fe46 100644
--- a/target/riscv/packed_helper.c
+++ b/target/riscv/packed_helper.c
@@ -425,3 +425,107 @@ static inline void do_uksub8(CPURISCVState *env, void
*vd, void *va,
}
RVPR(uksub8, 1, 1);
+
+/* 16-bit Shift Instructions */
+static inline void do_sra16(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int16_t *d = vd, *a = va;
+ uint8_t shift = *(uint8_t *)vb & 0xf;
+ d[i] = a[i] >> shift;
+}
+
+RVPR(sra16, 1, 2);
+
+static inline void do_srl16(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint16_t *d = vd, *a = va;
+ uint8_t shift = *(uint8_t *)vb & 0xf;
+ d[i] = a[i] >> shift;
+}
+
+RVPR(srl16, 1, 2);
+
+static inline void do_sll16(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint16_t *d = vd, *a = va;
+ uint8_t shift = *(uint8_t *)vb & 0xf;
+ d[i] = a[i] << shift;
+}
+
+RVPR(sll16, 1, 2);
+
+static inline void do_sra16_u(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int16_t *d = vd, *a = va;
+ uint8_t shift = *(uint8_t *)vb & 0xf;
+
+ d[i] = vssra16(env, 0, a[i], shift);
+}
+
+RVPR(sra16_u, 1, 2);
+
+static inline void do_srl16_u(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint16_t *d = vd, *a = va;
+ uint8_t shift = *(uint8_t *)vb & 0xf;
+
+ d[i] = vssrl16(env, 0, a[i], shift);
+}
+
+RVPR(srl16_u, 1, 2);
+
+static inline void do_ksll16(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int16_t *d = vd, *a = va, result;
+ uint8_t shift = *(uint8_t *)vb & 0xf;
+
+ result = a[i] << shift;
+ if (shift > (clrsb32(a[i]) - 16)) {
+ env->vxsat = 0x1;
+ d[i] = (a[i] & INT16_MIN) ? INT16_MIN : INT16_MAX;
+ } else {
+ d[i] = result;
+ }
+}
+
+RVPR(ksll16, 1, 2);
+
+static inline void do_kslra16(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int16_t *d = vd, *a = va;
+ int32_t shift = sextract32((*(target_ulong *)vb), 0, 5);
+
+ if (shift >= 0) {
+ do_ksll16(env, vd, va, vb, i);
+ } else {
+ shift = -shift;
+ shift = (shift == 16) ? 15 : shift;
+ d[i] = a[i] >> shift;
+ }
+}
+
+RVPR(kslra16, 1, 2);
+
+static inline void do_kslra16_u(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int16_t *d = vd, *a = va;
+ int32_t shift = sextract32((*(uint32_t *)vb), 0, 5);
+
+ if (shift >= 0) {
+ do_ksll16(env, vd, va, vb, i);
+ } else {
+ shift = -shift;
+ shift = (shift == 16) ? 15 : shift;
+ d[i] = vssra16(env, 0, a[i], shift);
+ }
+}
+
+RVPR(kslra16_u, 1, 2);
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 484ced3054..cf1357cee1 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -2687,6 +2687,13 @@ void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a,
int64_t c)
tcg_gen_andi_i64(d, d, mask);
}
+void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
+{
+ uint32_t mask = dup_const(MO_16, 0xffff << c);
+ tcg_gen_shli_i32(d, a, c);
+ tcg_gen_andi_i32(d, d, mask);
+}
+
void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz)
{
@@ -2738,6 +2745,13 @@ void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a,
int64_t c)
tcg_gen_andi_i64(d, d, mask);
}
+void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
+{
+ uint32_t mask = dup_const(MO_16, 0xffff >> c);
+ tcg_gen_shri_i32(d, a, c);
+ tcg_gen_andi_i32(d, d, mask);
+}
+
void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz)
{
@@ -2803,6 +2817,20 @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a,
int64_t c)
tcg_temp_free_i64(s);
}
+void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t c)
+{
+ uint32_t s_mask = dup_const(MO_16, 0x8000 >> c);
+ uint32_t c_mask = dup_const(MO_16, 0xffff >> c);
+ TCGv_i32 s = tcg_temp_new_i32();
+
+ tcg_gen_shri_i32(d, a, c);
+ tcg_gen_andi_i32(s, d, s_mask); /* isolate (shifted) sign bit */
+ tcg_gen_andi_i32(d, d, c_mask); /* clear out bits above sign */
+ tcg_gen_muli_i32(s, s, (2 << c) - 2); /* replicate isolated signs */
+ tcg_gen_or_i32(d, d, s); /* include sign extension */
+ tcg_temp_free_i32(s);
+}
+
void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz)
{
--
2.25.1
- [PATCH v2 00/37] target/riscv: support packed extension v0.9.4, LIU Zhiwei, 2021/06/10
- [PATCH v2 01/37] target/riscv: implementation-defined constant parameters, LIU Zhiwei, 2021/06/10
- [PATCH v2 02/37] target/riscv: Make the vector helper functions public, LIU Zhiwei, 2021/06/10
- [PATCH v2 03/37] target/riscv: 16-bit Addition & Subtraction Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 04/37] target/riscv: 8-bit Addition & Subtraction Instruction, LIU Zhiwei, 2021/06/10
- [PATCH v2 05/37] target/riscv: SIMD 16-bit Shift Instructions,
LIU Zhiwei <=
- [PATCH v2 06/37] target/riscv: SIMD 8-bit Shift Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 07/37] target/riscv: SIMD 16-bit Compare Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 08/37] target/riscv: SIMD 8-bit Compare Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 09/37] target/riscv: SIMD 16-bit Multiply Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 10/37] target/riscv: SIMD 8-bit Multiply Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 11/37] target/riscv: SIMD 16-bit Miscellaneous Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 12/37] target/riscv: SIMD 8-bit Miscellaneous Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 13/37] target/riscv: 8-bit Unpacking Instructions, LIU Zhiwei, 2021/06/10
- [PATCH v2 14/37] target/riscv: 16-bit Packing Instructions, LIU Zhiwei, 2021/06/10