[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 15/16] cputlb: Pass retaddr to tb_invalidate_phys_page_fas
From: |
Alex Bennée |
Subject: |
Re: [PATCH v4 15/16] cputlb: Pass retaddr to tb_invalidate_phys_page_fast |
Date: |
Wed, 25 Sep 2019 17:28:27 +0100 |
User-agent: |
mu4e 1.3.4; emacs 27.0.50 |
Richard Henderson <address@hidden> writes:
> Rather than rely on cpu->mem_io_pc, pass retaddr down directly.
>
> Within tb_invalidate_phys_page_range__locked, the is_cpu_write_access
> parameter is non-zero exactly when retaddr would be non-zero, so that
> is a simple replacement.
>
> Recognize that current_tb_not_found is true only when mem_io_pc
> (and now retaddr) are also non-zero, so remove a redundant test.
>
> Reviewed-by: David Hildenbrand <address@hidden>
> Signed-off-by: Richard Henderson <address@hidden>
Reviewed-by: Alex Bennée <address@hidden>
> ---
> accel/tcg/translate-all.h | 3 ++-
> accel/tcg/cputlb.c | 6 +-----
> accel/tcg/translate-all.c | 39 +++++++++++++++++++--------------------
> 3 files changed, 22 insertions(+), 26 deletions(-)
>
> diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
> index 31f2117188..135c1ea96a 100644
> --- a/accel/tcg/translate-all.h
> +++ b/accel/tcg/translate-all.h
> @@ -27,7 +27,8 @@ struct page_collection *page_collection_lock(tb_page_addr_t
> start,
> tb_page_addr_t end);
> void page_collection_unlock(struct page_collection *set);
> void tb_invalidate_phys_page_fast(struct page_collection *pages,
> - tb_page_addr_t start, int len);
> + tb_page_addr_t start, int len,
> + uintptr_t retaddr);
> void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
> void tb_check_watchpoint(CPUState *cpu);
>
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 0ca6ee60b3..ea5d12c59d 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -1093,11 +1093,7 @@ static void notdirty_write(CPUState *cpu, vaddr
> mem_vaddr, unsigned size,
> if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
> struct page_collection *pages
> = page_collection_lock(ram_addr, ram_addr + size);
> -
> - /* We require mem_io_pc in tb_invalidate_phys_page_range. */
> - cpu->mem_io_pc = retaddr;
> -
> - tb_invalidate_phys_page_fast(pages, ram_addr, size);
> + tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
> page_collection_unlock(pages);
> }
>
> diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
> index de4b697163..db77fb221b 100644
> --- a/accel/tcg/translate-all.c
> +++ b/accel/tcg/translate-all.c
> @@ -1889,7 +1889,7 @@ static void
> tb_invalidate_phys_page_range__locked(struct page_collection *pages,
> PageDesc *p, tb_page_addr_t start,
> tb_page_addr_t end,
> - int is_cpu_write_access)
> + uintptr_t retaddr)
> {
> TranslationBlock *tb;
> tb_page_addr_t tb_start, tb_end;
> @@ -1897,9 +1897,9 @@ tb_invalidate_phys_page_range__locked(struct
> page_collection *pages,
> #ifdef TARGET_HAS_PRECISE_SMC
> CPUState *cpu = current_cpu;
> CPUArchState *env = NULL;
> - int current_tb_not_found = is_cpu_write_access;
> + bool current_tb_not_found = retaddr != 0;
> + bool current_tb_modified = false;
> TranslationBlock *current_tb = NULL;
> - int current_tb_modified = 0;
> target_ulong current_pc = 0;
> target_ulong current_cs_base = 0;
> uint32_t current_flags = 0;
> @@ -1931,24 +1931,21 @@ tb_invalidate_phys_page_range__locked(struct
> page_collection *pages,
> if (!(tb_end <= start || tb_start >= end)) {
> #ifdef TARGET_HAS_PRECISE_SMC
> if (current_tb_not_found) {
> - current_tb_not_found = 0;
> - current_tb = NULL;
> - if (cpu->mem_io_pc) {
> - /* now we have a real cpu fault */
> - current_tb = tcg_tb_lookup(cpu->mem_io_pc);
> - }
> + current_tb_not_found = false;
> + /* now we have a real cpu fault */
> + current_tb = tcg_tb_lookup(retaddr);
> }
> if (current_tb == tb &&
> (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
> - /* If we are modifying the current TB, we must stop
> - its execution. We could be more precise by checking
> - that the modification is after the current PC, but it
> - would require a specialized function to partially
> - restore the CPU state */
> -
> - current_tb_modified = 1;
> - cpu_restore_state_from_tb(cpu, current_tb,
> - cpu->mem_io_pc, true);
> + /*
> + * If we are modifying the current TB, we must stop
> + * its execution. We could be more precise by checking
> + * that the modification is after the current PC, but it
> + * would require a specialized function to partially
> + * restore the CPU state.
> + */
> + current_tb_modified = true;
> + cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
> cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
> ¤t_flags);
> }
> @@ -2042,7 +2039,8 @@ void tb_invalidate_phys_range(target_ulong start,
> target_ulong end)
> * Call with all @pages in the range [@start, @start + len[ locked.
> */
> void tb_invalidate_phys_page_fast(struct page_collection *pages,
> - tb_page_addr_t start, int len)
> + tb_page_addr_t start, int len,
> + uintptr_t retaddr)
> {
> PageDesc *p;
>
> @@ -2069,7 +2067,8 @@ void tb_invalidate_phys_page_fast(struct
> page_collection *pages,
> }
> } else {
> do_invalidate:
> - tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
> 1);
> + tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
> + retaddr);
> }
> }
> #else
--
Alex Bennée
- Re: [PATCH v4 08/16] cputlb: Move ROM handling from I/O path to TLB path, (continued)
[PATCH v4 14/16] cputlb: Remove tb_invalidate_phys_page_range is_cpu_write_access, Richard Henderson, 2019/09/23
[PATCH v4 11/16] cputlb: Merge and move memory_notdirty_write_{prepare, complete}, Richard Henderson, 2019/09/23
[PATCH v4 15/16] cputlb: Pass retaddr to tb_invalidate_phys_page_fast, Richard Henderson, 2019/09/23
- Re: [PATCH v4 15/16] cputlb: Pass retaddr to tb_invalidate_phys_page_fast,
Alex Bennée <=
[PATCH v4 09/16] cputlb: Move NOTDIRTY handling from I/O path to TLB path, Richard Henderson, 2019/09/23
[PATCH v4 12/16] cputlb: Handle TLB_NOTDIRTY in probe_access, Richard Henderson, 2019/09/23
[PATCH v4 10/16] cputlb: Partially inline memory_region_section_get_iotlb, Richard Henderson, 2019/09/23
Re: [PATCH v4 10/16] cputlb: Partially inline memory_region_section_get_iotlb, Alex Bennée, 2019/09/25