[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 4/5] target/s390x: Fix shifting 32-bit values for more tha
From: |
David Hildenbrand |
Subject: |
Re: [PATCH v2 4/5] target/s390x: Fix shifting 32-bit values for more than 31 bits |
Date: |
Wed, 12 Jan 2022 09:43:47 +0100 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Thunderbird/91.4.0 |
>
> +static uint32_t cc_calc_sla_32(uint32_t src, int shift)
> +{
> + return cc_calc_sla_64(((uint64_t)src) << 32, shift);
> +}
> +
Nice trick. What about doing the shift in op_sla if s->insn->data == 31
and unifying to a single CC_OP_SLA ?
> static uint32_t cc_calc_flogr(uint64_t dst)
> {
> return dst ? 2 : 0;
> diff --git a/target/s390x/tcg/insn-data.def b/target/s390x/tcg/insn-data.def
> index 90c753068c..1c3e115712 100644
> --- a/target/s390x/tcg/insn-data.def
> +++ b/target/s390x/tcg/insn-data.def
> @@ -747,8 +747,8 @@
> C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64)
>
> /* ROTATE LEFT SINGLE LOGICAL */
> - C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0)
> - C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0)
> + C(0xeb1d, RLL, RSY_a, Z, r3_o, sh, new, r1_32, rll32, 0)
> + C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh, r1, 0, rll64, 0)
>
> /* ROTATE THEN INSERT SELECTED BITS */
> C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64)
> @@ -784,29 +784,29 @@
> C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0)
>
> /* SHIFT LEFT SINGLE */
> - D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31)
> - D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31)
> - D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63)
> + D(0x8b00, SLA, RS_a, Z, r1, sh, new, r1_32, sla, 0, 31)
> + D(0xebdd, SLAK, RSY_a, DO, r3, sh, new, r1_32, sla, 0, 31)
> + D(0xeb0b, SLAG, RSY_a, Z, r3, sh, r1, 0, sla, 0, 63)
> /* SHIFT LEFT SINGLE LOGICAL */
> - C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0)
> - C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0)
> - C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0)
> + C(0x8900, SLL, RS_a, Z, r1_o, sh, new, r1_32, sll, 0)
> + C(0xebdf, SLLK, RSY_a, DO, r3_o, sh, new, r1_32, sll, 0)
> + C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh, r1, 0, sll, 0)
> /* SHIFT RIGHT SINGLE */
> - C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32)
> - C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32)
> - C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64)
> + C(0x8a00, SRA, RS_a, Z, r1_32s, sh, new, r1_32, sra, s32)
> + C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh, new, r1_32, sra, s32)
> + C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh, r1, 0, sra, s64)
> /* SHIFT RIGHT SINGLE LOGICAL */
> - C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0)
> - C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0)
> - C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0)
> + C(0x8800, SRL, RS_a, Z, r1_32u, sh, new, r1_32, srl, 0)
> + C(0xebde, SRLK, RSY_a, DO, r3_32u, sh, new, r1_32, srl, 0)
> + C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh, r1, 0, srl, 0)
> /* SHIFT LEFT DOUBLE */
> - D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 63)
> + D(0x8f00, SLDA, RS_a, Z, r1_D32, sh, new, r1_D32, sla, 0, 63)
> /* SHIFT LEFT DOUBLE LOGICAL */
> - C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0)
> + C(0x8d00, SLDL, RS_a, Z, r1_D32, sh, new, r1_D32, sll, 0)
> /* SHIFT RIGHT DOUBLE */
> - C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64)
> + C(0x8e00, SRDA, RS_a, Z, r1_D32, sh, new, r1_D32, sra, s64)
> /* SHIFT RIGHT DOUBLE LOGICAL */
> - C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0)
> + C(0x8c00, SRDL, RS_a, Z, r1_D32, sh, new, r1_D32, srl, 0)
>
> /* SQUARE ROOT */
> F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP)
> diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
> index 68ca7e476a..5a2b609d0f 100644
> --- a/target/s390x/tcg/translate.c
> +++ b/target/s390x/tcg/translate.c
> @@ -1178,19 +1178,6 @@ struct DisasInsn {
> /* ====================================================================== */
> /* Miscellaneous helpers, used by several operations. */
>
> -static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
> -{
> - int b2 = get_field(s, b2);
> - int d2 = get_field(s, d2);
> -
> - if (b2 == 0) {
> - o->in2 = tcg_const_i64(d2 & mask);
> - } else {
> - o->in2 = get_address(s, 0, b2, d2);
> - tcg_gen_andi_i64(o->in2, o->in2, mask);
> - }
> -}
> -
> static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
> {
> if (dest == s->pc_tmp) {
> @@ -5923,17 +5910,19 @@ static void in2_ri2(DisasContext *s, DisasOps *o)
> }
> #define SPEC_in2_ri2 0
>
> -static void in2_sh32(DisasContext *s, DisasOps *o)
> +static void in2_sh(DisasContext *s, DisasOps *o)
> {
> - help_l2_shift(s, o, 31);
> -}
> -#define SPEC_in2_sh32 0
> + int b2 = get_field(s, b2);
> + int d2 = get_field(s, d2);
>
> -static void in2_sh64(DisasContext *s, DisasOps *o)
> -{
> - help_l2_shift(s, o, 63);
> + if (b2 == 0) {
> + o->in2 = tcg_const_i64(d2 & 0x3f);
> + } else {
> + o->in2 = get_address(s, 0, b2, d2);
> + tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
> + }
> }
> -#define SPEC_in2_sh64 0
> +#define SPEC_in2_sh 0
>
> static void in2_m2_8u(DisasContext *s, DisasOps *o)
> {
LGTM, thanks
Reviewed-by: David Hildenbrand <david@redhat.com>
--
Thanks,
David / dhildenb
- [PATCH v2 2/5] target/s390x: Fix SRDA CC calculation, (continued)
[PATCH v2 5/5] tests/tcg/s390x: Test shift instructions, Ilya Leoshkevich, 2022/01/11
[PATCH v2 4/5] target/s390x: Fix shifting 32-bit values for more than 31 bits, Ilya Leoshkevich, 2022/01/11
- Re: [PATCH v2 4/5] target/s390x: Fix shifting 32-bit values for more than 31 bits,
David Hildenbrand <=