(2) IOMMUMemoryRegion returns different target_as and the section is in
the IO region.
Common IOMMU devices don't have this issue since they are only in the
path of DMA access. Currently, the bug only occurs when ARM MPC device
(hw/misc/tz-mpc.c) returns 'blocked_io_as' to emulate blocked access
handling. Upcoming RISC-V wgChecker device is also affected by this bug.
Signed-off-by: Jim Shu <jim.shu@sifive.com>
---
accel/tcg/cputlb.c | 19 +++++++++----------
include/hw/core/cpu.h | 3 +++
2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 117b516739..8cf124b760 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1169,6 +1169,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
desc->fulltlb[index] = *full;
full = &desc->fulltlb[index];
full->xlat_section = iotlb - addr_page;
+ full->section = section;
full->phys_addr = paddr_page;
/* Now calculate the new entry */
@@ -1248,14 +1249,14 @@ static inline void cpu_unaligned_access(CPUState *cpu,
vaddr addr,
}
static MemoryRegionSection *
-io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
+io_prepare(hwaddr *out_offset, CPUState *cpu, CPUTLBEntryFull *full,
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
{
MemoryRegionSection *section;
hwaddr mr_offset;
- section = iotlb_to_section(cpu, xlat, attrs);
- mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
+ section = full->section;
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
if (!cpu->neg.can_do_io) {
cpu_io_recompile(cpu, retaddr);
@@ -1571,9 +1572,7 @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int
mmu_idx,
/* We must have an iotlb entry for MMIO */
if (tlb_addr & TLB_MMIO) {
- MemoryRegionSection *section =
- iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
- full->attrs);
+ MemoryRegionSection *section = full->section;
data->is_io = true;
data->mr = section->mr;
} else {
@@ -1972,7 +1971,7 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu,
CPUTLBEntryFull *full,
tcg_debug_assert(size > 0 && size <= 8);
attrs = full->attrs;
- section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full, attrs, addr, ra);
mr = section->mr;
BQL_LOCK_GUARD();
@@ -1993,7 +1992,7 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu,
CPUTLBEntryFull *full,
tcg_debug_assert(size > 8 && size <= 16);
attrs = full->attrs;
- section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full, attrs, addr, ra);
mr = section->mr;
BQL_LOCK_GUARD();
@@ -2513,7 +2512,7 @@ static uint64_t do_st_mmio_leN(CPUState *cpu,
CPUTLBEntryFull *full,
tcg_debug_assert(size > 0 && size <= 8);
attrs = full->attrs;
- section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full, attrs, addr, ra);
mr = section->mr;
BQL_LOCK_GUARD();
@@ -2533,7 +2532,7 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu,
CPUTLBEntryFull *full,
tcg_debug_assert(size > 8 && size <= 16);
attrs = full->attrs;
- section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full, attrs, addr, ra);
mr = section->mr;
BQL_LOCK_GUARD();
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index a2c8536943..3f6c10897b 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -217,6 +217,9 @@ typedef struct CPUTLBEntryFull {
*/
hwaddr xlat_section;
+ /* @section contains physical section. */
+ MemoryRegionSection *section;
+
/*
* @phys_addr contains the physical address in the address space
* given by cpu_asidx_from_attrs(cpu, @attrs).