qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 09/39] target/riscv: Add vandn.[vv,vx,vi] decoding, translati


From: Philipp Tomsich
Subject: Re: [PATCH 09/39] target/riscv: Add vandn.[vv,vx,vi] decoding, translation and execution support
Date: Thu, 2 Feb 2023 15:29:06 +0100

On Thu, 2 Feb 2023 at 13:42, Lawrence Hunter
<lawrence.hunter@codethink.co.uk> wrote:
>
> From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
>
> Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
> ---
>  target/riscv/helper.h                      |  9 +++++++++
>  target/riscv/insn32.decode                 |  3 +++
>  target/riscv/insn_trans/trans_rvzvkb.c.inc |  5 +++++
>  target/riscv/vcrypto_helper.c              | 19 +++++++++++++++++++
>  4 files changed, 36 insertions(+)
>
> diff --git a/target/riscv/helper.h b/target/riscv/helper.h
> index c980d52828..5de615ea78 100644
> --- a/target/riscv/helper.h
> +++ b/target/riscv/helper.h
> @@ -1171,3 +1171,12 @@ DEF_HELPER_5(vbrev8_v_b, void, ptr, ptr, ptr, env, i32)
>  DEF_HELPER_5(vbrev8_v_h, void, ptr, ptr, ptr, env, i32)
>  DEF_HELPER_5(vbrev8_v_w, void, ptr, ptr, ptr, env, i32)
>  DEF_HELPER_5(vbrev8_v_d, void, ptr, ptr, ptr, env, i32)
> +
> +DEF_HELPER_6(vandn_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vandn_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vandn_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vandn_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
> +DEF_HELPER_6(vandn_vx_b, void, ptr, ptr, tl, ptr, env, i32)
> +DEF_HELPER_6(vandn_vx_h, void, ptr, ptr, tl, ptr, env, i32)
> +DEF_HELPER_6(vandn_vx_w, void, ptr, ptr, tl, ptr, env, i32)
> +DEF_HELPER_6(vandn_vx_d, void, ptr, ptr, tl, ptr, env, i32)
> diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
> index 342199abc0..d6f5e4d198 100644
> --- a/target/riscv/insn32.decode
> +++ b/target/riscv/insn32.decode
> @@ -904,3 +904,6 @@ vror_vi         010100 . ..... ..... 011 ..... 1010111 
> @r_vm
>  vror_vi2        010101 . ..... ..... 011 ..... 1010111 @r_vm
>  vbrev8_v        010010 . ..... 01000 010 ..... 1010111 @r2_vm
>  vrev8_v         010010 . ..... 01001 010 ..... 1010111 @r2_vm
> +vandn_vi        000001 . ..... ..... 011 ..... 1010111 @r_vm
> +vandn_vv        000001 . ..... ..... 000 ..... 1010111 @r_vm
> +vandn_vx        000001 . ..... ..... 100 ..... 1010111 @r_vm
> diff --git a/target/riscv/insn_trans/trans_rvzvkb.c.inc 
> b/target/riscv/insn_trans/trans_rvzvkb.c.inc
> index 18b362db92..a973b27bdd 100644
> --- a/target/riscv/insn_trans/trans_rvzvkb.c.inc
> +++ b/target/riscv/insn_trans/trans_rvzvkb.c.inc
> @@ -147,6 +147,11 @@ static bool trans_##NAME(DisasContext *s, arg_rmr * a)   
>               \
>      return false;                                                      \
>  }
>
> +
> +GEN_OPIVV_TRANS(vandn_vv, zvkb_vv_check)
> +GEN_OPIVX_TRANS(vandn_vx, zvkb_vx_check)
> +GEN_OPIVI_TRANS(vandn_vi, IMM_SX, vandn_vx, zvkb_vx_check)

I don't see any reason why this shouldn't have gvec support (after
all, it is a andc with the arguments inverted) with something like
this:

static void gen_andn_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
    tcg_gen_andc_i64(ret, arg1, arg2);
}

static void gen_andn_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
{
    tcg_gen_andc_vec(vece, r, b, a);
}

static void tcg_gen_gvec_andn(unsigned vece, uint32_t dofs, uint32_t aofs,
                       uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
{
    static const GVecGen3 g = {
        .fni8 = gen_andn_i64,
        .fniv = gen_andn_vec,
        .fno = gen_helper_vec_andn,
        .prefer_i64 = TCG_TARGET_REG_BITS == 64,
    };

    if (aofs == bofs) {
        tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
    } else {
        tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
    }
}

static void tcg_gen_gvec_andns(unsigned vece, uint32_t dofs, uint32_t aofs,
                               TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
    static const GVecGen2s g = {
        .fni8 = gen_andn_i64,
        .fniv = gen_andn_vec,
        .fno = gen_helper_vec_andns,
        .prefer_i64 = TCG_TARGET_REG_BITS == 64,
    };

    tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g);
}

static void tcg_gen_gvec_andni(unsigned vece, uint32_t dofs, uint32_t aofs,
                               int64_t c, uint32_t oprsz, uint32_t maxsz)
{
    TCGv_i64 tmp = tcg_constant_i64(c);
    tcg_gen_gvec_andns(vece, dofs, aofs, tmp, oprsz, maxsz);
}

/* vandn.v[vxi] */
GEN_OPIVV_GVEC_TRANS_CHECK(vandn_vv, andn, zvkb_check_vv)
GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andns, zvkb_check_vx)
GEN_OPIVI_GVEC_TRANS_CHECK(vandn_vi, IMM_SX, vandn_vx, andni, zvkb_check_vi)

> +
>  static bool vxrev8_check(DisasContext *s, arg_rmr *a)
>  {
>      return s->cfg_ptr->ext_zvkb == true && vext_check_isa_ill(s) &&
> diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
> index b09fe5fa2a..900e68dfb0 100644
> --- a/target/riscv/vcrypto_helper.c
> +++ b/target/riscv/vcrypto_helper.c
> @@ -135,3 +135,22 @@ GEN_VEXT_V(vrev8_v_b, 1)
>  GEN_VEXT_V(vrev8_v_h, 2)
>  GEN_VEXT_V(vrev8_v_w, 4)
>  GEN_VEXT_V(vrev8_v_d, 8)
> +
> +#define DO_ANDN(a, b) ((b) & ~(a))
> +RVVCALL(OPIVV2, vandn_vv_b, OP_UUU_B, H1, H1, H1, DO_ANDN)
> +RVVCALL(OPIVV2, vandn_vv_h, OP_UUU_H, H2, H2, H2, DO_ANDN)
> +RVVCALL(OPIVV2, vandn_vv_w, OP_UUU_W, H4, H4, H4, DO_ANDN)
> +RVVCALL(OPIVV2, vandn_vv_d, OP_UUU_D, H8, H8, H8, DO_ANDN)
> +GEN_VEXT_VV(vandn_vv_b, 1)
> +GEN_VEXT_VV(vandn_vv_h, 2)
> +GEN_VEXT_VV(vandn_vv_w, 4)
> +GEN_VEXT_VV(vandn_vv_d, 8)
> +
> +RVVCALL(OPIVX2, vandn_vx_b, OP_UUU_B, H1, H1, DO_ANDN)
> +RVVCALL(OPIVX2, vandn_vx_h, OP_UUU_H, H2, H2, DO_ANDN)
> +RVVCALL(OPIVX2, vandn_vx_w, OP_UUU_W, H4, H4, DO_ANDN)
> +RVVCALL(OPIVX2, vandn_vx_d, OP_UUU_D, H8, H8, DO_ANDN)
> +GEN_VEXT_VX(vandn_vx_b, 1)
> +GEN_VEXT_VX(vandn_vx_h, 2)
> +GEN_VEXT_VX(vandn_vx_w, 4)
> +GEN_VEXT_VX(vandn_vx_d, 8)
> --
> 2.39.1
>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]