[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH 02/16] accel/tcg: memory access from CPU will pass access_typ
From: |
Jim Shu |
Subject: |
[RFC PATCH 02/16] accel/tcg: memory access from CPU will pass access_type to IOMMU |
Date: |
Wed, 12 Jun 2024 16:14:02 +0800 |
It is the preparation patch for upcoming RISC-V wgChecker device.
Since RISC-V wgChecker could permit access in RO/WO permission, the
IOMMUMemoryRegion could return different section for read & write
access. The memory access from CPU should also pass the access_type to
IOMMU translate function so that IOMMU could return the correct section
of specified access_type.
Signed-off-by: Jim Shu <jim.shu@sifive.com>
---
accel/tcg/cputlb.c | 15 +++++++++------
include/exec/exec-all.h | 11 +++++++----
system/physmem.c | 16 +++++++++++-----
target/alpha/helper.c | 2 +-
target/arm/tcg/tlb_helper.c | 2 +-
target/avr/helper.c | 2 +-
target/cris/helper.c | 2 +-
target/hppa/mem_helper.c | 2 +-
target/i386/tcg/sysemu/excp_helper.c | 3 ++-
target/loongarch/tcg/tlb_helper.c | 2 +-
target/m68k/helper.c | 10 +++++++---
target/microblaze/helper.c | 8 ++++----
target/mips/tcg/sysemu/tlb_helper.c | 4 ++--
target/openrisc/mmu.c | 2 +-
target/ppc/mmu_helper.c | 2 +-
target/riscv/cpu_helper.c | 2 +-
target/rx/cpu.c | 3 ++-
target/s390x/tcg/excp_helper.c | 2 +-
target/sh4/helper.c | 2 +-
target/sparc/mmu_helper.c | 6 +++---
target/tricore/helper.c | 2 +-
target/xtensa/helper.c | 3 ++-
22 files changed, 61 insertions(+), 42 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 8cf124b760..f1b07f6926 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1036,7 +1036,8 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full,
CPUTLBEntry *ent,
* critical section.
*/
void tlb_set_page_full(CPUState *cpu, int mmu_idx,
- vaddr addr, CPUTLBEntryFull *full)
+ vaddr addr, MMUAccessType access_type,
+ CPUTLBEntryFull *full)
{
CPUTLB *tlb = &cpu->neg.tlb;
CPUTLBDesc *desc = &tlb->d[mmu_idx];
@@ -1063,7 +1064,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
prot = full->prot;
asidx = cpu_asidx_from_attrs(cpu, full->attrs);
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
- &xlat, &sz, full->attrs,
&prot);
+ &xlat, &sz, full->attrs, &prot,
+ access_type);
assert(sz >= TARGET_PAGE_SIZE);
tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
@@ -1200,7 +1202,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs, int prot,
- int mmu_idx, uint64_t size)
+ MMUAccessType access_type, int mmu_idx,
+ uint64_t size)
{
CPUTLBEntryFull full = {
.phys_addr = paddr,
@@ -1210,15 +1213,15 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
};
assert(is_power_of_2(size));
- tlb_set_page_full(cpu, mmu_idx, addr, &full);
+ tlb_set_page_full(cpu, mmu_idx, addr, access_type, &full);
}
void tlb_set_page(CPUState *cpu, vaddr addr,
- hwaddr paddr, int prot,
+ hwaddr paddr, int prot, MMUAccessType access_type,
int mmu_idx, uint64_t size)
{
tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
- prot, mmu_idx, size);
+ prot, access_type, mmu_idx, size);
}
/*
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index b6b46ad13c..0d5363ac02 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -205,7 +205,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState
*cpu,
* used by tlb_flush_page.
*/
void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
- CPUTLBEntryFull *full);
+ MMUAccessType access_type, CPUTLBEntryFull *full);
/**
* tlb_set_page_with_attrs:
@@ -231,7 +231,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr
addr,
*/
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs,
- int prot, int mmu_idx, vaddr size);
+ int prot, MMUAccessType access_type, int mmu_idx,
+ vaddr size);
/* tlb_set_page:
*
* This function is equivalent to calling tlb_set_page_with_attrs()
@@ -240,7 +241,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
*/
void tlb_set_page(CPUState *cpu, vaddr addr,
hwaddr paddr, int prot,
- int mmu_idx, vaddr size);
+ MMUAccessType access_type, int mmu_idx,
+ vaddr size);
#else
static inline void tlb_init(CPUState *cpu)
{
@@ -591,7 +593,8 @@ void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t
length);
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
- MemTxAttrs attrs, int *prot);
+ MemTxAttrs attrs, int *prot,
+ MMUAccessType access_type);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section);
#endif
diff --git a/system/physmem.c b/system/physmem.c
index b7847db1a2..a5a0ea77bd 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -674,12 +674,14 @@ void tcg_iommu_init_notifier_list(CPUState *cpu)
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
hwaddr *xlat, hwaddr *plen,
- MemTxAttrs attrs, int *prot)
+ MemTxAttrs attrs, int *prot,
+ MMUAccessType access_type)
{
MemoryRegionSection *section;
IOMMUMemoryRegion *iommu_mr;
IOMMUMemoryRegionClass *imrc;
IOMMUTLBEntry iotlb;
+ IOMMUAccessFlags iommu_flags;
int iommu_idx;
hwaddr addr = orig_addr;
AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
@@ -696,10 +698,14 @@ address_space_translate_for_iotlb(CPUState *cpu, int
asidx, hwaddr orig_addr,
iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
- /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
- * doesn't short-cut its translation table walk.
- */
- iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
+
+ if (access_type == MMU_DATA_STORE) {
+ iommu_flags = IOMMU_WO;
+ } else {
+ iommu_flags = IOMMU_RO;
+ }
+
+ iotlb = imrc->translate(iommu_mr, addr, iommu_flags, iommu_idx);
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
/* Update the caller's prot bits to remove permissions the IOMMU
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
index 2f1000c99f..7ea548f1d0 100644
--- a/target/alpha/helper.c
+++ b/target/alpha/helper.c
@@ -318,7 +318,7 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
}
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ prot, access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 885bf4ec14..186c612957 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -361,7 +361,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
res.f.extra.arm.shareability = res.cacheattrs.shareability;
- tlb_set_page_full(cs, mmu_idx, address, &res.f);
+ tlb_set_page_full(cs, mmu_idx, address, access_type, &res.f);
return true;
} else if (probe) {
return false;
diff --git a/target/avr/helper.c b/target/avr/helper.c
index 345708a1b3..07d795848e 100644
--- a/target/avr/helper.c
+++ b/target/avr/helper.c
@@ -149,7 +149,7 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
}
- tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, address, paddr, prot, access_type, mmu_idx, page_size);
return true;
}
diff --git a/target/cris/helper.c b/target/cris/helper.c
index 1c3f86876f..644244312c 100644
--- a/target/cris/helper.c
+++ b/target/cris/helper.c
@@ -68,7 +68,7 @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
phy = res.phy & ~0x80000000;
prot = res.prot;
tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ prot, access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index b984f730aa..0a42d47ce9 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -461,7 +461,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
* because we record the large page here in the hppa tlb.
*/
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ prot, type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
diff --git a/target/i386/tcg/sysemu/excp_helper.c
b/target/i386/tcg/sysemu/excp_helper.c
index 8fb05b1f53..c7a53a1179 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -610,7 +610,8 @@ bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK,
out.paddr & TARGET_PAGE_MASK,
cpu_get_mem_attrs(env),
- out.prot, mmu_idx, out.page_size);
+ out.prot, access_type, mmu_idx,
+ out.page_size);
return true;
}
diff --git a/target/loongarch/tcg/tlb_helper.c
b/target/loongarch/tcg/tlb_helper.c
index d6331f9b0b..7f9a44128a 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -490,7 +490,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address,
int size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
" prot %d\n", __func__, address, physical, prot);
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 7967ad13cb..e63ebe4e18 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -967,7 +967,7 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
address & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
- mmu_idx, TARGET_PAGE_SIZE);
+ qemu_access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -987,7 +987,8 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
address, access_type, &page_size);
if (likely(ret == 0)) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot, mmu_idx, page_size);
+ physical & TARGET_PAGE_MASK, prot, qemu_access_type,
+ mmu_idx, page_size);
return true;
}
@@ -1459,6 +1460,7 @@ void HELPER(ptest)(CPUM68KState *env, uint32_t addr,
uint32_t is_read)
int prot;
int ret;
target_ulong page_size;
+ MMUAccessType qemu_access_type;
access_type = ACCESS_PTEST;
if (env->dfc & 4) {
@@ -1466,9 +1468,11 @@ void HELPER(ptest)(CPUM68KState *env, uint32_t addr,
uint32_t is_read)
}
if ((env->dfc & 3) == 2) {
access_type |= ACCESS_CODE;
+ qemu_access_type = MMU_INST_FETCH;
}
if (!is_read) {
access_type |= ACCESS_STORE;
+ qemu_access_type = MMU_DATA_STORE;
}
env->mmu.mmusr = 0;
@@ -1478,7 +1482,7 @@ void HELPER(ptest)(CPUM68KState *env, uint32_t addr,
uint32_t is_read)
if (ret == 0) {
tlb_set_page(env_cpu(env), addr & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK,
- prot, access_type & ACCESS_SUPER ?
+ prot, qemu_access_type, access_type & ACCESS_SUPER ?
MMU_KERNEL_IDX : MMU_USER_IDX, page_size);
}
}
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 5d3259ce31..5229daf1d5 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -53,8 +53,8 @@ bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_RWX;
- tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
- TARGET_PAGE_SIZE);
+ tlb_set_page_with_attrs(cs, address, address, attrs, prot, access_type,
+ mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -65,8 +65,8 @@ bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot);
- tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
- TARGET_PAGE_SIZE);
+ tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, access_type,
+ mmu_idx, TARGET_PAGE_SIZE);
return true;
}
diff --git a/target/mips/tcg/sysemu/tlb_helper.c
b/target/mips/tcg/sysemu/tlb_helper.c
index 3ba6d369a6..90b9668bd0 100644
--- a/target/mips/tcg/sysemu/tlb_helper.c
+++ b/target/mips/tcg/sysemu/tlb_helper.c
@@ -931,7 +931,7 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
#if !defined(TARGET_MIPS64)
@@ -949,7 +949,7 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
}
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
index c632d5230b..a04003c37e 100644
--- a/target/openrisc/mmu.c
+++ b/target/openrisc/mmu.c
@@ -128,7 +128,7 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int
size,
if (likely(excp == 0)) {
tlb_set_page(cs, addr & TARGET_PAGE_MASK,
phys_addr & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index b0a0676beb..dd264ac335 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -1368,7 +1368,7 @@ bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
if (ppc_xlate(cpu, eaddr, access_type, &raddr,
&page_size, &prot, mmu_idx, !probe)) {
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
- prot, mmu_idx, 1UL << page_size);
+ prot, access_type, mmu_idx, 1UL << page_size);
return true;
}
if (probe) {
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 6709622dd3..161df34626 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1419,7 +1419,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
- prot, mmu_idx, tlb_size);
+ prot, access_type, mmu_idx, tlb_size);
return true;
} else if (probe) {
return false;
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index 8a584f0a11..2e0fd20aef 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -163,7 +163,8 @@ static bool rx_cpu_tlb_fill(CPUState *cs, vaddr addr, int
size,
/* Linear mapping */
address = physical = addr & TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, physical, prot, access_type,
+ mmu_idx, TARGET_PAGE_SIZE);
return true;
}
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
index 4c0b692c9e..bcd9f80474 100644
--- a/target/s390x/tcg/excp_helper.c
+++ b/target/s390x/tcg/excp_helper.c
@@ -178,7 +178,7 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
"%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
__func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 6702910627..00b06898b7 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -807,7 +807,7 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
if (ret == MMU_OK) {
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
- tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, physical, prot, access_type, mmu_idx,
TARGET_PAGE_SIZE);
return true;
}
if (probe) {
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index 9ff06026b8..1dade2eef8 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -229,7 +229,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
"Translate at %" VADDR_PRIx " -> "
HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
address, full.phys_addr, vaddr);
- tlb_set_page_full(cs, mmu_idx, vaddr, &full);
+ tlb_set_page_full(cs, mmu_idx, vaddr, access_type, &full);
return true;
}
@@ -245,7 +245,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
neverland. Fake/overridden mappings will be flushed when
switching to normal mode. */
full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- tlb_set_page_full(cs, mmu_idx, vaddr, &full);
+ tlb_set_page_full(cs, mmu_idx, vaddr, access_type, &full);
return true;
} else {
if (access_type == MMU_INST_FETCH) {
@@ -769,7 +769,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl,
env->dmmu.mmu_primary_context,
env->dmmu.mmu_secondary_context);
- tlb_set_page_full(cs, mmu_idx, address, &full);
+ tlb_set_page_full(cs, mmu_idx, address, access_type, &full);
return true;
}
if (probe) {
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
index 7014255f77..295f515772 100644
--- a/target/tricore/helper.c
+++ b/target/tricore/helper.c
@@ -84,7 +84,7 @@ bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
- mmu_idx, TARGET_PAGE_SIZE);
+ rw, mmu_idx, TARGET_PAGE_SIZE);
return true;
} else {
assert(ret < 0);
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
index ca214b948a..3a155638c5 100644
--- a/target/xtensa/helper.c
+++ b/target/xtensa/helper.c
@@ -280,7 +280,8 @@ bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int
size,
tlb_set_page(cs,
address & TARGET_PAGE_MASK,
paddr & TARGET_PAGE_MASK,
- access, mmu_idx, page_size);
+ access, access_type, mmu_idx,
+ page_size);
return true;
} else if (probe) {
return false;
--
2.17.1
- [RFC PATCH 00/16] Implements RISC-V WorldGuard extension v0.4, Jim Shu, 2024/06/12
- [RFC PATCH 02/16] accel/tcg: memory access from CPU will pass access_type to IOMMU,
Jim Shu <=
- [RFC PATCH 04/16] hw/misc: riscv_worldguard: Add RISC-V WorldGuard global config, Jim Shu, 2024/06/12
- [RFC PATCH 05/16] target/riscv: Add CPU options of WorldGuard CPU extension, Jim Shu, 2024/06/12
- [RFC PATCH 07/16] target/riscv: Add defines for WorldGuard CSRs, Jim Shu, 2024/06/12
- [RFC PATCH 11/16] hw/misc: riscv_worldguard: Add API to enable WG extension of CPU, Jim Shu, 2024/06/12
- [RFC PATCH 13/16] hw/misc: riscv_wgchecker: Implement wgchecker slot registers, Jim Shu, 2024/06/12
- [RFC PATCH 15/16] hw/misc: riscv_wgchecker: Check the slot settings in translate, Jim Shu, 2024/06/12
- [RFC PATCH 16/16] hw/riscv: virt: Add WorldGuard support, Jim Shu, 2024/06/12
- [RFC PATCH 08/16] target/riscv: Allow global WG config to set WG CPU callbacks, Jim Shu, 2024/06/12