qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 27/27] tcg: Use tlb_fill probe from tlb_vaddr


From: Philippe Mathieu-Daudé
Subject: Re: [Qemu-devel] [PATCH v2 27/27] tcg: Use tlb_fill probe from tlb_vaddr_to_host
Date: Thu, 9 May 2019 11:56:50 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.6.1

On 5/9/19 8:02 AM, Richard Henderson wrote:
> Most of the existing users would continue around a loop which
> would fault the tlb entry in via a normal load/store.  But for
> SVE we have a true non-faulting case which requires the new
> probing form of tlb_fill.
> 
> Reviewed-by: Peter Maydell <address@hidden>
> Signed-off-by: Richard Henderson <address@hidden>
> ---
> v2: Update function docs comment.
> ---
>  include/exec/cpu_ldst.h | 50 ++++++-----------------------
>  accel/tcg/cputlb.c      | 69 ++++++++++++++++++++++++++++++++++++-----
>  target/arm/sve_helper.c |  6 +---
>  3 files changed, 72 insertions(+), 53 deletions(-)
> 
> diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
> index d78041d7a0..7b28a839d2 100644
> --- a/include/exec/cpu_ldst.h
> +++ b/include/exec/cpu_ldst.h
> @@ -433,50 +433,20 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, 
> uintptr_t mmu_idx,
>   * @mmu_idx: MMU index to use for lookup
>   *
>   * Look up the specified guest virtual index in the TCG softmmu TLB.
> - * If the TLB contains a host virtual address suitable for direct RAM
> - * access, then return it. Otherwise (TLB miss, TLB entry is for an
> - * I/O access, etc) return NULL.
> - *
> - * This is the equivalent of the initial fast-path code used by
> - * TCG backends for guest load and store accesses.
> + * If we can translate a host virtual address suitable for direct RAM
> + * access, without causing a guest exception, then return it.
> + * Otherwise (TLB entry is for an I/O access, guest software
> + * TLB fill required, etc) return NULL.
>   */
> +#ifdef CONFIG_USER_ONLY
>  static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
> -                                      int access_type, int mmu_idx)
> +                                      MMUAccessType access_type, int mmu_idx)
>  {
> -#if defined(CONFIG_USER_ONLY)
>      return g2h(addr);
> -#else
> -    CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr);
> -    abi_ptr tlb_addr;
> -    uintptr_t haddr;
> -
> -    switch (access_type) {
> -    case 0:
> -        tlb_addr = tlbentry->addr_read;
> -        break;
> -    case 1:
> -        tlb_addr = tlb_addr_write(tlbentry);
> -        break;
> -    case 2:
> -        tlb_addr = tlbentry->addr_code;
> -        break;
> -    default:
> -        g_assert_not_reached();
> -    }
> -
> -    if (!tlb_hit(tlb_addr, addr)) {
> -        /* TLB entry is for a different page */
> -        return NULL;
> -    }
> -
> -    if (tlb_addr & ~TARGET_PAGE_MASK) {
> -        /* IO access */
> -        return NULL;
> -    }
> -
> -    haddr = addr + tlbentry->addend;
> -    return (void *)haddr;
> -#endif /* defined(CONFIG_USER_ONLY) */
>  }
> +#else
> +void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
> +                        MMUAccessType access_type, int mmu_idx);
> +#endif
>  
>  #endif /* CPU_LDST_H */
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index dfcd9ae168..45a5c4e123 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -1007,6 +1007,16 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry 
> *iotlbentry,
>      }
>  }
>  
> +static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
> +{
> +#if TCG_OVERSIZED_GUEST
> +    return *(target_ulong *)((uintptr_t)entry + ofs);
> +#else
> +    /* ofs might correspond to .addr_write, so use atomic_read */
> +    return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
> +#endif
> +}
> +
>  /* Return true if ADDR is present in the victim tlb, and has been copied
>     back to the main tlb.  */
>  static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
> @@ -1017,14 +1027,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t 
> mmu_idx, size_t index,
>      assert_cpu_is_self(ENV_GET_CPU(env));
>      for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
>          CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
> -        target_ulong cmp;
> -
> -        /* elt_ofs might correspond to .addr_write, so use atomic_read */
> -#if TCG_OVERSIZED_GUEST
> -        cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
> -#else
> -        cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
> -#endif
> +        target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
>  
>          if (cmp == page) {
>              /* Found entry in victim tlb, swap tlb and iotlb.  */
> @@ -1108,6 +1111,56 @@ void probe_write(CPUArchState *env, target_ulong addr, 
> int size, int mmu_idx,
>      }
>  }
>  
> +void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
> +                        MMUAccessType access_type, int mmu_idx)
> +{
> +    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
> +    uintptr_t tlb_addr, page;
> +    size_t elt_ofs;
> +
> +    switch (access_type) {
> +    case MMU_DATA_LOAD:
> +        elt_ofs = offsetof(CPUTLBEntry, addr_read);
> +        break;
> +    case MMU_DATA_STORE:
> +        elt_ofs = offsetof(CPUTLBEntry, addr_write);
> +        break;
> +    case MMU_INST_FETCH:
> +        elt_ofs = offsetof(CPUTLBEntry, addr_code);
> +        break;
> +    default:
> +        g_assert_not_reached();
> +    }
> +
> +    page = addr & TARGET_PAGE_MASK;
> +    tlb_addr = tlb_read_ofs(entry, elt_ofs);
> +
> +    if (!tlb_hit_page(tlb_addr, page)) {
> +        uintptr_t index = tlb_index(env, mmu_idx, addr);
> +
> +        if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
> +            CPUState *cs = ENV_GET_CPU(env);
> +            CPUClass *cc = CPU_GET_CLASS(cs);
> +
> +            if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
> +                /* Non-faulting page table read failed.  */
> +                return NULL;
> +            }
> +
> +            /* TLB resize via tlb_fill may have moved the entry.  */
> +            entry = tlb_entry(env, mmu_idx, addr);
> +        }
> +        tlb_addr = tlb_read_ofs(entry, elt_ofs);
> +    }
> +
> +    if (tlb_addr & ~TARGET_PAGE_MASK) {
> +        /* IO access */
> +        return NULL;
> +    }
> +
> +    return (void *)(addr + entry->addend);
> +}
> +
>  /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
>   * operations, or io operations to proceed.  Return the host address.  */
>  static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
> diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
> index bc847250dd..fd434c66ea 100644
> --- a/target/arm/sve_helper.c
> +++ b/target/arm/sve_helper.c
> @@ -4598,11 +4598,7 @@ static void sve_ldnf1_r(CPUARMState *env, void *vg, 
> const target_ulong addr,
>       * in the real world, obviously.)
>       *
>       * Then there are the annoying special cases with watchpoints...
> -     *
> -     * TODO: Add a form of tlb_fill that does not raise an exception,
> -     * with a form of tlb_vaddr_to_host and a set of loads to match.
> -     * The non_fault_vaddr_to_host would handle everything, usually,
> -     * and the loads would handle the iomem path for watchpoints.
> +     * TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true).
>       */
>      host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
>      split = max_for_page(addr, mem_off, mem_max);
> 

Reviewed-by: Philippe Mathieu-Daudé <address@hidden>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]