qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v4 1/5] target/riscv: add vector unit stride load and store i


From: LIU Zhiwei
Subject: Re: [PATCH v4 1/5] target/riscv: add vector unit stride load and store instructions
Date: Fri, 28 Feb 2020 09:50:52 +0800
User-agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.4.2



On 2020/2/28 3:17, Richard Henderson wrote:
On 2/25/20 2:35 AM, LIU Zhiwei wrote:
+static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
+{
+    int legal = widen ? 2 << s->lmul : 1 << s->lmul;
+
+    return !((s->lmul == 0x3 && widen) || (reg % legal));
+}
+
+static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm)
+{
+    return !(s->lmul > 1 && vm == 0 && vd == 0);
+}
+
+static bool vext_check_nf(DisasContext *s, uint32_t nf)
+{
+    return s->lmul * (nf + 1) <= 8;
+}
Some commentary would be good here, quoting the rule being applied.  E.g. "The
destination vector register group for a masked vector instruction can only
overlap the source mask regis-
ter (v0) when LMUL=1. (Section 5.3)"
Good idea.

      
+static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+{
+    uint8_t nf = a->nf + 1;
Perhaps NF should have the +1 done during decode, so that it cannot be
forgotten here or elsewhere. 

Perhaps not. It will  not be used elsewhere. And it will need one more bit in FIELD().
 E.g.

%nf      31:3  !function=ex_plus_1
@r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... \
         &r2nfvm %nf %rs1 %rd

Where ex_plus_1 is the obvious modification of ex_shift_1().

      
+static inline uint32_t vext_nf(uint32_t desc)
+{
+    return (simd_data(desc) >> 11) & 0xf;
+}
+
+static inline uint32_t vext_mlen(uint32_t desc)
+{
+    return simd_data(desc) & 0xff;
+}
+
+static inline uint32_t vext_vm(uint32_t desc)
+{
+    return (simd_data(desc) >> 8) & 0x1;
+}
+
+static inline uint32_t vext_lmul(uint32_t desc)
+{
+    return (simd_data(desc) >> 9) & 0x3;
+}
You should use FIELD() to define the fields, and then use FIELD_EX32 and
FIELD_DP32 to reference them.
Nice, I will find some place to define the fields.

      
+/*
+ * This function checks watchpoint before real load operation.
+ *
+ * In softmmu mode, the TLB API probe_access is enough for watchpoint check.
+ * In user mode, there is no watchpoint support now.
+ *
+ * It will triggle an exception if there is no mapping in TLB
trigger.
Yes.

      
+ * and page table walk can't fill the TLB entry. Then the guest
+ * software can return here after process the exception or never return.
+ */
+static void probe_read_access(CPURISCVState *env, target_ulong addr,
+        target_ulong len, uintptr_t ra)
+{
+    while (len) {
+        const target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
+        const target_ulong curlen = MIN(pagelen, len);
+
+        probe_read(env, addr, curlen, cpu_mmu_index(env, false), ra);
+        addr += curlen;
+        len -= curlen;
+    }
+}
+
+static void probe_write_access(CPURISCVState *env, target_ulong addr,
+        target_ulong len, uintptr_t ra)
+{
+    while (len) {
+        const target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
+        const target_ulong curlen = MIN(pagelen, len);
+
+        probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
+        addr += curlen;
+        len -= curlen;
+    }
+}
A loop is overkill -- the access cannot span to 3 pages.  
Yes, I will just do as you suggest!

In the unit stride load, without mask,  the max access len is checked . It is 512 in bytes.
And current target page is 4096 in bytes.

#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */

These two functions
can be merged using probe_access and MMU_DATA_{LOAD,STORE}.

+
+#ifdef HOST_WORDS_BIGENDIAN
+static void vext_clear(void *tail, uint32_t cnt, uint32_t tot)
+{
+    /*
+     * Split the remaining range to two parts.
+     * The first part is in the last uint64_t unit.
+     * The second part start from the next uint64_t unit.
+     */
+    int part1 = 0, part2 = tot - cnt;
+    if (cnt % 64) {
+        part1 = 64 - (cnt % 64);
+        part2 = tot - cnt - part1;
+        memset(tail & ~(63ULL), 0, part1);
+        memset((tail + 64) & ~(63ULL), 0, part2);
You're confusing bit and byte offsets -- cnt and tot are both byte offsets.
Yes, I will fix it.

      
+static inline int vext_elem_mask(void *v0, int mlen, int index)
+{
+
+    int idx = (index * mlen) / 8;
+    int pos = (index * mlen) % 8;
+
+    switch (mlen) {
+    case 8:
+        return *((uint8_t *)v0 + H1(index)) & 0x1;
+    case 16:
+        return *((uint16_t *)v0 + H2(index)) & 0x1;
+    case 32:
+        return *((uint32_t *)v0 + H4(index)) & 0x1;
+    case 64:
+        return *((uint64_t *)v0 + index) & 0x1;
+    default:








+        return (*((uint8_t *)v0 + H1(idx)) >> pos) & 0x1;
+    }
This is not what I had in mind, and looks wrong as well.

    int idx = (index * mlen) / 64;
    int pos = (index * mlen) % 64;
    return (((uint64_t *)v0)[idx] >> pos) & 1;

You also might consider passing log2(mlen), so the multiplication could be
strength-reduced to a shift.
I don't think so. For example, when mlen is 8 bits and index is 0, it will reduce to
return (((uint64_t *)v0)[0]) & 1
And it's not right.

The right bit is first bit in vector register 0. And in host big endianess,
it will be  the first bit of the seventh byte.

+#define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF)              \
+static void vext_##NAME##_ld_elem(CPURISCVState *env, abi_ptr addr, \
+        uint32_t idx, void *vd, uintptr_t retaddr)                  \
+{                                                                   \
+    int mmu_idx = cpu_mmu_index(env, false);                        \
+    MTYPE data;                                                     \
+    ETYPE *cur = ((ETYPE *)vd + H(idx));                            \
+    data = "" addr, mmu_idx, retaddr);    \
+    *cur = data;                                                    \
+}                                                                   \
If you're going to use cpu_mmu_index, you might as well use cpu_SUFF_data_ra(),
which does not require the mmu_idx parameter.
Good.

      
+#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF)                       \
+static void vext_##NAME##_st_elem(CPURISCVState *env, abi_ptr addr,   \
+        uint32_t idx, void *vd, uintptr_t retaddr)                    \
+{                                                                     \
+    int mmu_idx = cpu_mmu_index(env, false);                          \
+    ETYPE data = "" *)vd + H(idx));                             \
+    cpu_##STSUF##_mmuidx_ra(env, addr, data, mmu_idx, retaddr);       \
+}
Likewise.

+/*
+ *** unit-stride: load vector element from continuous guest memory
+ */
+static inline void vext_ld_us_mask(void *vd, void *v0, target_ulong base,
+        CPURISCVState *env, uint32_t desc,
+        vext_ld_elem_fn ld_elem,
+        vext_ld_clear_elem clear_elem,
+        uint32_t esz, uint32_t msz, uintptr_t ra)
+{
+    uint32_t i, k;
+    uint32_t mlen = vext_mlen(desc);
You don't need to pass mlen, since it's
Yes.

      
+/* unit-stride: store vector element to guest memory */
+static void vext_st_us_mask(void *vd, void *v0, target_ulong base,
+        CPURISCVState *env, uint32_t desc,
+        vext_st_elem_fn st_elem,
+        uint32_t esz, uint32_t msz, uintptr_t ra)
+{
+    uint32_t i, k;
+    uint32_t nf = vext_nf(desc);
+    uint32_t mlen = vext_mlen(desc);
+    uint32_t vlmax = vext_maxsz(desc) / esz;
+
+    /* probe every access*/
+    for (i = 0; i < env->vl; i++) {
+        if (!vext_elem_mask(v0, mlen, i)) {
+            continue;
+        }
+        probe_write_access(env, base + nf * i * msz, nf * msz, ra);
+    }
+    /* store bytes to guest memory */
+    for (i = 0; i < env->vl; i++) {
+        k = 0;
+        if (!vext_elem_mask(v0, mlen, i)) {
+            continue;
+        }
+        while (k < nf) {
+            target_ulong addr = base + (i * nf + k) * msz;
+            st_elem(env, addr, i + k * vlmax, vd, ra);
+            k++;
+        }
+    }
+}
I'll note that vext_ld_us_mask and vext_st_us_mask are identical, except for:

1) probe_read/write_access (which I already suggested merging, using
MMUAccessType),

2) the name of the ld_elem/st_elem variable (the function types are already
identical), and

3) the clear loop at the end of the load (which could be conditional on
clear_elem != NULL; after inlining, this should be optimized away).
Good idea. Thanks.

Zhiwei

      
+static void vext_st_us(void *vd, target_ulong base,
+        CPURISCVState *env, uint32_t desc,
+        vext_st_elem_fn st_elem,
+        uint32_t esz, uint32_t msz, uintptr_t ra)
Similarly.


r~


reply via email to

[Prev in Thread] Current Thread [Next in Thread]