[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC v9 07/50] target/arm: move physical address translation to cpu-mmu
From: |
Claudio Fontana |
Subject: |
[RFC v9 07/50] target/arm: move physical address translation to cpu-mmu |
Date: |
Wed, 17 Mar 2021 19:29:30 +0100 |
get_phys_addr is needed for KVM too, and in turn it requires
the aa64_va_parameter* family of functions.
Create cpu-mmu and cpu-mmu-sysemu to store these and
other mmu-related functions.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
---
target/arm/cpu-mmu.h | 118 ++
target/arm/cpu.h | 3 -
target/arm/internals.h | 34 -
target/arm/cpu-mmu-sysemu.c | 2258 +++++++++++++++++++++++++++++++
target/arm/cpu-mmu.c | 124 ++
target/arm/cpu.c | 1 +
target/arm/tcg/helper.c | 2392 +--------------------------------
target/arm/tcg/m_helper.c | 1 +
target/arm/tcg/pauth_helper.c | 2 +-
target/arm/tcg/tlb_helper.c | 1 +
target/arm/meson.build | 2 +
11 files changed, 2507 insertions(+), 2429 deletions(-)
create mode 100644 target/arm/cpu-mmu.h
create mode 100644 target/arm/cpu-mmu-sysemu.c
create mode 100644 target/arm/cpu-mmu.c
diff --git a/target/arm/cpu-mmu.h b/target/arm/cpu-mmu.h
new file mode 100644
index 0000000000..fdedc8fb92
--- /dev/null
+++ b/target/arm/cpu-mmu.h
@@ -0,0 +1,118 @@
+/*
+ * QEMU ARM CPU address translation related code
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+#ifndef ARM_CPU_MMU_H
+#define ARM_CPU_MMU_H
+
+#include "cpu.h"
+#include "internals.h"
+
+/*
+ * Parameters of a given virtual address, as extracted from the
+ * translation control register (TCR) for a given regime.
+ */
+typedef struct ARMVAParameters {
+ unsigned tsz : 8;
+ unsigned select : 1;
+ bool tbi : 1;
+ bool epd : 1;
+ bool hpd : 1;
+ bool using16k : 1;
+ bool using64k : 1;
+} ARMVAParameters;
+
+/* cpu-mmu.c */
+
+int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
+int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
+int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
+ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
+ ARMMMUIdx mmu_idx, bool data);
+
+/* Return the SCTLR value which controls this address translation regime */
+static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
+}
+
+/* Convert a possible stage1+2 MMU index into the appropriate
+ * stage 1 MMU index
+ */
+static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_SE10_0:
+ return ARMMMUIdx_Stage1_SE0;
+ case ARMMMUIdx_SE10_1:
+ return ARMMMUIdx_Stage1_SE1;
+ case ARMMMUIdx_SE10_1_PAN:
+ return ARMMMUIdx_Stage1_SE1_PAN;
+ case ARMMMUIdx_E10_0:
+ return ARMMMUIdx_Stage1_E0;
+ case ARMMMUIdx_E10_1:
+ return ARMMMUIdx_Stage1_E1;
+ case ARMMMUIdx_E10_1_PAN:
+ return ARMMMUIdx_Stage1_E1_PAN;
+ default:
+ return mmu_idx;
+ }
+}
+
+/* Return true if the translation regime is using LPAE format page tables */
+static inline bool regime_using_lpae_format(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ int el = regime_el(env, mmu_idx);
+ if (el == 2 || arm_el_is_aa64(env, el)) {
+ return true;
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)
+ && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
+ return true;
+ }
+ return false;
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* cpu-mmu-sysemu.c */
+
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
+
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion);
+
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ __attribute__((nonnull));
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs);
+
+#endif /* !CONFIG_USER_ONLY */
+
+#endif /* ARM_CPU_MMU_H */
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 193a49ec7f..7877d5417f 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1027,9 +1027,6 @@ void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
-hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
- MemTxAttrs *attrs);
-
int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 479dc10463..e137bb0ac0 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1021,23 +1021,6 @@ static inline uint32_t aarch64_pstate_valid_mask(const
ARMISARegisters *id)
return valid;
}
-/*
- * Parameters of a given virtual address, as extracted from the
- * translation control register (TCR) for a given regime.
- */
-typedef struct ARMVAParameters {
- unsigned tsz : 8;
- unsigned select : 1;
- bool tbi : 1;
- bool epd : 1;
- bool hpd : 1;
- bool using16k : 1;
- bool using64k : 1;
-} ARMVAParameters;
-
-ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
- ARMMMUIdx mmu_idx, bool data);
-
static inline int exception_target_el(CPUARMState *env)
{
int target_el = MAX(1, arm_current_el(env));
@@ -1085,29 +1068,12 @@ typedef struct V8M_SAttributes {
bool irvalid;
} V8M_SAttributes;
-void v8m_security_lookup(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- V8M_SAttributes *sattrs);
-
-bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
- int *prot, bool *is_subpage,
- ARMMMUFaultInfo *fi, uint32_t *mregion);
-
/* Cacheability and shareability attributes for a memory access */
typedef struct ARMCacheAttrs {
unsigned int attrs:8; /* as in the MAIR register encoding */
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
} ARMCacheAttrs;
-bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
- __attribute__((nonnull));
-
void arm_log_exception(int idx);
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/arm/cpu-mmu-sysemu.c b/target/arm/cpu-mmu-sysemu.c
new file mode 100644
index 0000000000..fcea5e4fb8
--- /dev/null
+++ b/target/arm/cpu-mmu-sysemu.c
@@ -0,0 +1,2258 @@
+/*
+ * QEMU ARM CPU address translation related code (sysemu-only)
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+
+#include "target/arm/idau.h"
+#include "qemu/range.h"
+#include "cpu-mmu.h"
+
+static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ bool s1_is_el0,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int
*prot,
+ target_ulong *page_size_ptr,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ __attribute__((nonnull));
+
+/* Return true if the specified stage of address translation is disabled */
+static inline bool regime_translation_disabled(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ uint64_t hcr_el2;
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
+ (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
+ case R_V7M_MPU_CTRL_ENABLE_MASK:
+ /* Enabled, but not for HardFault and NMI */
+ return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
+ case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
+ /* Enabled for all cases */
+ return false;
+ case 0:
+ default:
+ /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
+ * we warned about that in armv7m_nvic.c when the guest set it.
+ */
+ return true;
+ }
+ }
+
+ hcr_el2 = arm_hcr_el2_eff(env);
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ /* HCR.DC means HCR.VM behaves as 1 */
+ return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
+ }
+
+ if (hcr_el2 & HCR_TGE) {
+ /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
+ if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
+ return true;
+ }
+ }
+
+ if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
+ /* HCR.DC means SCTLR_EL1.M behaves as 0 */
+ return true;
+ }
+
+ return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
+}
+
+static inline bool regime_translation_big_endian(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
+}
+
+/* Return the TTBR associated with this translation regime */
+static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ttbrn)
+{
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ return env->cp15.vttbr_el2;
+ }
+ if (mmu_idx == ARMMMUIdx_Stage2_S) {
+ return env->cp15.vsttbr_el2;
+ }
+ if (ttbrn == 0) {
+ return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
+ } else {
+ return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
+ }
+}
+
+static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_SE20_0:
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_SE0:
+ case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSUser:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MSUserNegPri:
+ return true;
+ default:
+ return false;
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @ap: The 3-bit access permissions (AP[2:0])
+ * @domain_prot: The 2-bit domain access permissions
+ */
+static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ap, int domain_prot)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ if (domain_prot == 3) {
+ return PAGE_READ | PAGE_WRITE;
+ }
+
+ switch (ap) {
+ case 0:
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ return 0;
+ }
+ switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
+ case SCTLR_S:
+ return is_user ? 0 : PAGE_READ;
+ case SCTLR_R:
+ return PAGE_READ;
+ default:
+ return 0;
+ }
+ case 1:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 2:
+ if (is_user) {
+ return PAGE_READ;
+ } else {
+ return PAGE_READ | PAGE_WRITE;
+ }
+ case 3:
+ return PAGE_READ | PAGE_WRITE;
+ case 4: /* Reserved. */
+ return 0;
+ case 5:
+ return is_user ? 0 : PAGE_READ;
+ case 6:
+ return PAGE_READ;
+ case 7:
+ if (!arm_feature(env, ARM_FEATURE_V6K)) {
+ return 0;
+ }
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags.
+ *
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @is_user: TRUE if accessing from PL0
+ */
+static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
+{
+ switch (ap) {
+ case 0:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 1:
+ return PAGE_READ | PAGE_WRITE;
+ case 2:
+ return is_user ? 0 : PAGE_READ;
+ case 3:
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int
+simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
+{
+ return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
+}
+
+/* Translate S2 section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @s2ap: The 2-bit stage2 access permissions (S2AP)
+ * @xn: XN (execute-never) bits
+ * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
+ */
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
+{
+ int prot = 0;
+
+ if (s2ap & 1) {
+ prot |= PAGE_READ;
+ }
+ if (s2ap & 2) {
+ prot |= PAGE_WRITE;
+ }
+
+ if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
+ switch (xn) {
+ case 0:
+ prot |= PAGE_EXEC;
+ break;
+ case 1:
+ if (s1_is_el0) {
+ prot |= PAGE_EXEC;
+ }
+ break;
+ case 2:
+ break;
+ case 3:
+ if (!s1_is_el0) {
+ prot |= PAGE_EXEC;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ if (!extract32(xn, 1, 1)) {
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
+ prot |= PAGE_EXEC;
+ }
+ }
+ }
+ return prot;
+}
+
+/* Translate section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @is_aa64: TRUE if AArch64
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @ns: NS (non-secure) bit
+ * @xn: XN (execute-never) bit
+ * @pxn: PXN (privileged execute-never) bit
+ */
+static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
+ int ap, int ns, int xn, int pxn)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+ int prot_rw, user_rw;
+ bool have_wxn;
+ int wxn = 0;
+
+ assert(mmu_idx != ARMMMUIdx_Stage2);
+ assert(mmu_idx != ARMMMUIdx_Stage2_S);
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ if (is_user) {
+ prot_rw = user_rw;
+ } else {
+ if (user_rw && regime_is_pan(env, mmu_idx)) {
+ /* PAN forbids data accesses but doesn't affect insn fetch */
+ prot_rw = 0;
+ } else {
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
+ }
+ }
+
+ if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
+ return prot_rw;
+ }
+
+ /* TODO have_wxn should be replaced with
+ * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
+ * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
+ * compatible processors have EL2, which is required for [U]WXN.
+ */
+ have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
+
+ if (have_wxn) {
+ wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
+ }
+
+ if (is_aa64) {
+ if (regime_has_2_ranges(mmu_idx) && !is_user) {
+ xn = pxn || (user_rw & PAGE_WRITE);
+ }
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ switch (regime_el(env, mmu_idx)) {
+ case 1:
+ case 3:
+ if (is_user) {
+ xn = xn || !(user_rw & PAGE_READ);
+ } else {
+ int uwxn = 0;
+ if (have_wxn) {
+ uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
+ }
+ xn = xn || !(prot_rw & PAGE_READ) || pxn ||
+ (uwxn && (user_rw & PAGE_WRITE));
+ }
+ break;
+ case 2:
+ break;
+ }
+ } else {
+ xn = wxn = 0;
+ }
+
+ if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
+ return prot_rw;
+ }
+ return prot_rw | PAGE_EXEC;
+}
+
+static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint32_t *table, uint32_t address)
+{
+ /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
+ TCR *tcr = regime_tcr(env, mmu_idx);
+
+ if (address & tcr->mask) {
+ if (tcr->raw_tcr & TTBCR_PD1) {
+ /* Translation table walk disabled for TTBR1 */
+ return false;
+ }
+ *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
+ } else {
+ if (tcr->raw_tcr & TTBCR_PD0) {
+ /* Translation table walk disabled for TTBR0 */
+ return false;
+ }
+ *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
+ }
+ *table |= (address >> 18) & 0x3ffc;
+ return true;
+}
+
+/* Translate a S1 pagetable walk through S2 if needed. */
+static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
+ hwaddr addr, bool *is_secure,
+ ARMMMUFaultInfo *fi)
+{
+ if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
+ !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
+ target_ulong s2size;
+ hwaddr s2pa;
+ int s2prot;
+ int ret;
+ ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
+ : ARMMMUIdx_Stage2;
+ ARMCacheAttrs cacheattrs = {};
+ MemTxAttrs txattrs = {};
+
+ ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
+ &s2pa, &txattrs, &s2prot, &s2size, fi,
+ &cacheattrs);
+ if (ret) {
+ assert(fi->type != ARMFault_None);
+ fi->s2addr = addr;
+ fi->stage2 = true;
+ fi->s1ptw = true;
+ fi->s1ns = !*is_secure;
+ return ~0;
+ }
+ if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
+ (cacheattrs.attrs & 0xf0) == 0) {
+ /*
+ * PTW set and S1 walk touched S2 Device memory:
+ * generate Permission fault.
+ */
+ fi->type = ARMFault_Permission;
+ fi->s2addr = addr;
+ fi->stage2 = true;
+ fi->s1ptw = true;
+ fi->s1ns = !*is_secure;
+ return ~0;
+ }
+
+ if (arm_is_secure_below_el3(env)) {
+ /* Check if page table walk is to secure or non-secure PA space. */
+ if (*is_secure) {
+ *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
+ } else {
+ *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
+ }
+ } else {
+ assert(!*is_secure);
+ }
+
+ addr = s2pa;
+ }
+ return addr;
+}
+
+/* All loads done in the course of a page table walk go through here. */
+static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ MemTxResult result = MEMTX_OK;
+ AddressSpace *as;
+ uint32_t data;
+
+ addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
+ attrs.secure = is_secure;
+ as = arm_addressspace(cs, attrs);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ data = address_space_ldl_be(as, addr, attrs, &result);
+ } else {
+ data = address_space_ldl_le(as, addr, attrs, &result);
+ }
+ if (result == MEMTX_OK) {
+ return data;
+ }
+ fi->type = ARMFault_SyncExternalOnWalk;
+ fi->ea = arm_extabort_type(result);
+ return 0;
+}
+
+static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ MemTxResult result = MEMTX_OK;
+ AddressSpace *as;
+ uint64_t data;
+
+ addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
+ attrs.secure = is_secure;
+ as = arm_addressspace(cs, attrs);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ data = address_space_ldq_be(as, addr, attrs, &result);
+ } else {
+ data = address_space_ldq_le(as, addr, attrs, &result);
+ }
+ if (result == MEMTX_OK) {
+ return data;
+ }
+ fi->type = ARMFault_SyncExternalOnWalk;
+ fi->ea = arm_extabort_type(result);
+ return 0;
+}
+
+static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi)
+{
+ CPUState *cs = env_cpu(env);
+ int level = 1;
+ uint32_t table;
+ uint32_t desc;
+ int type;
+ int ap;
+ int domain = 0;
+ int domain_prot;
+ hwaddr phys_addr;
+ uint32_t dacr;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
+ /* Section translation fault if page walk is disabled by PD0 or PD1 */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ type = (desc & 3);
+ domain = (desc >> 5) & 0x0f;
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
+ if (type == 0) {
+ /* Section translation fault. */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ if (type != 2) {
+ level = 2;
+ }
+ if (domain_prot == 0 || domain_prot == 2) {
+ fi->type = ARMFault_Domain;
+ goto do_fault;
+ }
+ if (type == 2) {
+ /* 1Mb section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ ap = (desc >> 10) & 3;
+ *page_size = 1024 * 1024;
+ } else {
+ /* Lookup l2 entry. */
+ if (type == 1) {
+ /* Coarse pagetable. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ } else {
+ /* Fine pagetable. */
+ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ *page_size = 0x10000;
+ break;
+ case 2: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
+ *page_size = 0x1000;
+ break;
+ case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
+ if (type == 1) {
+ /* ARMv6/XScale extended small page format */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)
+ || arm_feature(env, ARM_FEATURE_V6)) {
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ *page_size = 0x1000;
+ } else {
+ /* UNPREDICTABLE in ARMv5; we choose to take a
+ * page translation fault.
+ */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ } else {
+ phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
+ *page_size = 0x400;
+ }
+ ap = (desc >> 4) & 3;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
+ }
+ }
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ *prot |= *prot ? PAGE_EXEC : 0;
+ if (!(*prot & (1 << access_type))) {
+ /* Access permission fault. */
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+ *phys_ptr = phys_addr;
+ return false;
+do_fault:
+ fi->domain = domain;
+ fi->level = level;
+ return true;
+}
+
+static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size, ARMMMUFaultInfo *fi)
+{
+ CPUState *cs = env_cpu(env);
+ ARMCPU *cpu = env_archcpu(env);
+ int level = 1;
+ uint32_t table;
+ uint32_t desc;
+ uint32_t xn;
+ uint32_t pxn = 0;
+ int type;
+ int ap;
+ int domain = 0;
+ int domain_prot;
+ hwaddr phys_addr;
+ uint32_t dacr;
+ bool ns;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
+ /* Section translation fault if page walk is disabled by PD0 or PD1 */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ type = (desc & 3);
+ if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
+ /* Section translation fault, or attempt to use the encoding
+ * which is Reserved on implementations without PXN.
+ */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ if ((type == 1) || !(desc & (1 << 18))) {
+ /* Page or Section. */
+ domain = (desc >> 5) & 0x0f;
+ }
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ if (type == 1) {
+ level = 2;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
+ if (domain_prot == 0 || domain_prot == 2) {
+ /* Section or Page domain fault */
+ fi->type = ARMFault_Domain;
+ goto do_fault;
+ }
+ if (type != 1) {
+ if (desc & (1 << 18)) {
+ /* Supersection. */
+ phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
+ phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
+ phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
+ *page_size = 0x1000000;
+ } else {
+ /* Section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ *page_size = 0x100000;
+ }
+ ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
+ xn = desc & (1 << 4);
+ pxn = desc & 1;
+ ns = extract32(desc, 19, 1);
+ } else {
+ if (cpu_isar_feature(aa32_pxn, cpu)) {
+ pxn = (desc >> 2) & 1;
+ }
+ ns = extract32(desc, 3, 1);
+ /* Lookup l2 entry. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ xn = desc & (1 << 15);
+ *page_size = 0x10000;
+ break;
+ case 2: case 3: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ xn = desc & 1;
+ *page_size = 0x1000;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
+ }
+ }
+ if (domain_prot == 3) {
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ } else {
+ if (pxn && !regime_is_user(env, mmu_idx)) {
+ xn = 1;
+ }
+ if (xn && access_type == MMU_INST_FETCH) {
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V6K) &&
+ (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
+ /* The simplified model uses AP[0] as an access control bit. */
+ if ((ap & 1) == 0) {
+ /* Access flag fault. */
+ fi->type = ARMFault_AccessFlag;
+ goto do_fault;
+ }
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
+ } else {
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ }
+ if (*prot && !xn) {
+ *prot |= PAGE_EXEC;
+ }
+ if (!(*prot & (1 << access_type))) {
+ /* Access permission fault. */
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+ }
+ if (ns) {
+ /* The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the attribute will already be non-secure.
+ */
+ attrs->secure = false;
+ }
+ *phys_ptr = phys_addr;
+ return false;
+do_fault:
+ fi->domain = domain;
+ fi->level = level;
+ return true;
+}
+
+/*
+ * check_s2_mmu_setup
+ * @cpu: ARMCPU
+ * @is_aa64: True if the translation regime is in AArch64 state
+ * @startlevel: Suggested starting level
+ * @inputsize: Bitsize of IPAs
+ * @stride: Page-table stride (See the ARM ARM)
+ *
+ * Returns true if the suggested S2 translation parameters are OK and
+ * false otherwise.
+ */
+static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
+ int inputsize, int stride)
+{
+ const int grainsize = stride + 3;
+ int startsizecheck;
+
+ /* Negative levels are never allowed. */
+ if (level < 0) {
+ return false;
+ }
+
+ startsizecheck = inputsize - ((3 - level) * stride + grainsize);
+ if (startsizecheck < 1 || startsizecheck > stride + 4) {
+ return false;
+ }
+
+ if (is_aa64) {
+ CPUARMState *env = &cpu->env;
+ unsigned int pamax = arm_pamax(cpu);
+
+ switch (stride) {
+ case 13: /* 64KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 42)) {
+ return false;
+ }
+ break;
+ case 11: /* 16KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 40)) {
+ return false;
+ }
+ break;
+ case 9: /* 4KB Pages. */
+ if (level == 0 && pamax <= 42) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Inputsize checks. */
+ if (inputsize > pamax &&
+ (arm_el_is_aa64(env, 1) || inputsize > 40)) {
+ /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
+ return false;
+ }
+ } else {
+ /* AArch32 only supports 4KB pages. Assert on that. */
+ assert(stride == 9);
+
+ if (level == 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* Translate from the 4-bit stage 2 representation of
+ * memory attributes (without cache-allocation hints) to
+ * the 8-bit representation of the stage 1 MAIR registers
+ * (which includes allocation hints).
+ *
+ * ref: shared/translation/attrs/S2AttrDecode()
+ * .../S2ConvertAttrsHints()
+ */
+static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
+{
+ uint8_t hiattr = extract32(s2attrs, 2, 2);
+ uint8_t loattr = extract32(s2attrs, 0, 2);
+ uint8_t hihint = 0, lohint = 0;
+
+ if (hiattr != 0) { /* normal memory */
+ if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
+ hiattr = loattr = 1; /* non-cacheable */
+ } else {
+ if (hiattr != 1) { /* Write-through or write-back */
+ hihint = 3; /* RW allocate */
+ }
+ if (loattr != 1) { /* Write-through or write-back */
+ lohint = 3; /* RW allocate */
+ }
+ }
+ }
+
+ return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
+}
+
+static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
+ ARMMMUIdx mmu_idx)
+{
+ uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint32_t el = regime_el(env, mmu_idx);
+ int select, tsz;
+ bool epd, hpd;
+
+ assert(mmu_idx != ARMMMUIdx_Stage2_S);
+
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ /* VTCR */
+ bool sext = extract32(tcr, 4, 1);
+ bool sign = extract32(tcr, 3, 1);
+
+ /*
+ * If the sign-extend bit is not the same as t0sz[3], the result
+ * is unpredictable. Flag this as a guest error.
+ */
+ if (sign != sext) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
+ }
+ tsz = sextract32(tcr, 0, 4) + 8;
+ select = 0;
+ hpd = false;
+ epd = false;
+ } else if (el == 2) {
+ /* HTCR */
+ tsz = extract32(tcr, 0, 3);
+ select = 0;
+ hpd = extract64(tcr, 24, 1);
+ epd = false;
+ } else {
+ int t0sz = extract32(tcr, 0, 3);
+ int t1sz = extract32(tcr, 16, 3);
+
+ if (t1sz == 0) {
+ select = va > (0xffffffffu >> t0sz);
+ } else {
+ /* Note that we will detect errors later. */
+ select = va >= ~(0xffffffffu >> t1sz);
+ }
+ if (!select) {
+ tsz = t0sz;
+ epd = extract32(tcr, 7, 1);
+ hpd = extract64(tcr, 41, 1);
+ } else {
+ tsz = t1sz;
+ epd = extract32(tcr, 23, 1);
+ hpd = extract64(tcr, 42, 1);
+ }
+ /* For aarch32, hpd0 is not enabled without t2e as well. */
+ hpd &= extract32(tcr, 6, 1);
+ }
+
+ return (ARMVAParameters) {
+ .tsz = tsz,
+ .select = select,
+ .epd = epd,
+ .hpd = hpd,
+ };
+}
+
+/**
+ * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
+ *
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
+ * prot and page_size may not be filled in, and the populated fsr value
provides
+ * information on why the translation aborted, in the format of a long-format
+ * DFSR/IFSR fault register, with the following caveats:
+ * * the WnR bit is never set (the caller must do this).
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
+ * @mmu_idx: MMU index indicating required translation regime
+ * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
+ * walk), must be true if this is stage 2 of a stage 1+2 walk for
an
+ * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is
ignored.
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @attrs: set to the memory transaction attributes to use
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size_ptr: set to the size of the page containing phys_ptr
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
+ */
+static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ bool s1_is_el0,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int
*prot,
+ target_ulong *page_size_ptr,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+ /* Read an LPAE long-descriptor translation table. */
+ ARMFaultType fault_type = ARMFault_Translation;
+ uint32_t level;
+ ARMVAParameters param;
+ uint64_t ttbr;
+ hwaddr descaddr, indexmask, indexmask_grainsize;
+ uint32_t tableattrs;
+ target_ulong page_size;
+ uint32_t attrs;
+ int32_t stride;
+ int addrsize, inputsize;
+ TCR *tcr = regime_tcr(env, mmu_idx);
+ int ap, ns, xn, pxn;
+ uint32_t el = regime_el(env, mmu_idx);
+ uint64_t descaddrmask;
+ bool aarch64 = arm_el_is_aa64(env, el);
+ bool guarded = false;
+
+ /* TODO: This code does not support shareability levels. */
+ if (aarch64) {
+ param = aa64_va_parameters(env, address, mmu_idx,
+ access_type != MMU_INST_FETCH);
+ level = 0;
+ addrsize = 64 - 8 * param.tbi;
+ inputsize = 64 - param.tsz;
+ } else {
+ param = aa32_va_parameters(env, address, mmu_idx);
+ level = 1;
+ addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
+ inputsize = addrsize - param.tsz;
+ }
+
+ /*
+ * We determined the region when collecting the parameters, but we
+ * have not yet validated that the address is valid for the region.
+ * Extract the top bits and verify that they all match select.
+ *
+ * For aa32, if inputsize == addrsize, then we have selected the
+ * region by exclusion in aa32_va_parameters and there is no more
+ * validation to do here.
+ */
+ if (inputsize < addrsize) {
+ target_ulong top_bits = sextract64(address, inputsize,
+ addrsize - inputsize);
+ if (-top_bits != param.select) {
+ /* The gap between the two regions is a Translation fault */
+ fault_type = ARMFault_Translation;
+ goto do_fault;
+ }
+ }
+
+ if (param.using64k) {
+ stride = 13;
+ } else if (param.using16k) {
+ stride = 11;
+ } else {
+ stride = 9;
+ }
+
+ /* Note that QEMU ignores shareability and cacheability attributes,
+ * so we don't need to do anything with the SH, ORGN, IRGN fields
+ * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
+ * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
+ * implement any ASID-like capability so we can ignore it (instead
+ * we will always flush the TLB any time the ASID is changed).
+ */
+ ttbr = regime_ttbr(env, mmu_idx, param.select);
+
+ /* Here we should have set up all the parameters for the translation:
+ * inputsize, ttbr, epd, stride, tbi
+ */
+
+ if (param.epd) {
+ /* Translation table walk disabled => Translation fault on TLB miss
+ * Note: This is always 0 on 64-bit EL2 and EL3.
+ */
+ goto do_fault;
+ }
+
+ if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
+ /* The starting level depends on the virtual address size (which can
+ * be up to 48 bits) and the translation granule size. It indicates
+ * the number of strides (stride bits at a time) needed to
+ * consume the bits of the input address. In the pseudocode this is:
+ * level = 4 - RoundUp((inputsize - grainsize) / stride)
+ * where their 'inputsize' is our 'inputsize', 'grainsize' is
+ * our 'stride + 3' and 'stride' is our 'stride'.
+ * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
+ * = 4 - (inputsize - stride - 3 + stride - 1) / stride
+ * = 4 - (inputsize - 4) / stride;
+ */
+ level = 4 - (inputsize - 4) / stride;
+ } else {
+ /* For stage 2 translations the starting level is specified by the
+ * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
+ */
+ uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
+ uint32_t startlevel;
+ bool ok;
+
+ if (!aarch64 || stride == 9) {
+ /* AArch32 or 4KB pages */
+ startlevel = 2 - sl0;
+
+ if (cpu_isar_feature(aa64_st, cpu)) {
+ startlevel &= 3;
+ }
+ } else {
+ /* 16KB or 64KB pages */
+ startlevel = 3 - sl0;
+ }
+
+ /* Check that the starting level is valid. */
+ ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
+ inputsize, stride);
+ if (!ok) {
+ fault_type = ARMFault_Translation;
+ goto do_fault;
+ }
+ level = startlevel;
+ }
+
+ indexmask_grainsize = (1ULL << (stride + 3)) - 1;
+ indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
+
+ /* Now we can extract the actual base address from the TTBR */
+ descaddr = extract64(ttbr, 0, 48);
+ /*
+ * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
+ * and also to mask out CnP (bit 0) which could validly be non-zero.
+ */
+ descaddr &= ~indexmask;
+
+ /* The address field in the descriptor goes up to bit 39 for ARMv7
+ * but up to bit 47 for ARMv8, but we use the descaddrmask
+ * up to bit 39 for AArch32, because we don't need other bits in that case
+ * to construct next descriptor address (anyway they should be all zeroes).
+ */
+ descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
+ ~indexmask_grainsize;
+
+ /* Secure accesses start with the page table in secure memory and
+ * can be downgraded to non-secure at any step. Non-secure accesses
+ * remain non-secure. We implement this by just ORing in the NSTable/NS
+ * bits at each step.
+ */
+ tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
+ for (;;) {
+ uint64_t descriptor;
+ bool nstable;
+
+ descaddr |= (address >> (stride * (4 - level))) & indexmask;
+ descaddr &= ~7ULL;
+ nstable = extract32(tableattrs, 4, 1);
+ descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+
+ if (!(descriptor & 1) ||
+ (!(descriptor & 2) && (level == 3))) {
+ /* Invalid, or the Reserved level 3 encoding */
+ goto do_fault;
+ }
+ descaddr = descriptor & descaddrmask;
+
+ if ((descriptor & 2) && (level < 3)) {
+ /* Table entry. The top five bits are attributes which may
+ * propagate down through lower levels of the table (and
+ * which are all arranged so that 0 means "no effect", so
+ * we can gather them up by ORing in the bits at each level).
+ */
+ tableattrs |= extract64(descriptor, 59, 5);
+ level++;
+ indexmask = indexmask_grainsize;
+ continue;
+ }
+ /* Block entry at level 1 or 2, or page entry at level 3.
+ * These are basically the same thing, although the number
+ * of bits we pull in from the vaddr varies.
+ */
+ page_size = (1ULL << ((stride * (4 - level)) + 3));
+ descaddr |= (address & (page_size - 1));
+ /* Extract attributes from the descriptor */
+ attrs = extract64(descriptor, 2, 10)
+ | (extract64(descriptor, 52, 12) << 10);
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ /* Stage 2 table descriptors do not include any attribute fields */
+ break;
+ }
+ /* Merge in attributes from table descriptors */
+ attrs |= nstable << 3; /* NS */
+ guarded = extract64(descriptor, 50, 1); /* GP */
+ if (param.hpd) {
+ /* HPD disables all the table attributes except NSTable. */
+ break;
+ }
+ attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
+ /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
+ * means "force PL1 access only", which means forcing AP[1] to 0.
+ */
+ attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
+ attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
+ break;
+ }
+ /* Here descaddr is the final physical address, and attributes
+ * are all in attrs.
+ */
+ fault_type = ARMFault_AccessFlag;
+ if ((attrs & (1 << 8)) == 0) {
+ /* Access flag */
+ goto do_fault;
+ }
+
+ ap = extract32(attrs, 4, 2);
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ ns = mmu_idx == ARMMMUIdx_Stage2;
+ xn = extract32(attrs, 11, 2);
+ *prot = get_S2prot(env, ap, xn, s1_is_el0);
+ } else {
+ ns = extract32(attrs, 3, 1);
+ xn = extract32(attrs, 12, 1);
+ pxn = extract32(attrs, 11, 1);
+ *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
+ }
+
+ fault_type = ARMFault_Permission;
+ if (!(*prot & (1 << access_type))) {
+ goto do_fault;
+ }
+
+ if (ns) {
+ /* The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the attribute will already be non-secure.
+ */
+ txattrs->secure = false;
+ }
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
+ if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
+ arm_tlb_bti_gp(txattrs) = true;
+ }
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
+ } else {
+ /* Index into MAIR registers for cache attributes */
+ uint8_t attrindx = extract32(attrs, 0, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
+ }
+ cacheattrs->shareability = extract32(attrs, 6, 2);
+
+ *phys_ptr = descaddr;
+ *page_size_ptr = page_size;
+ return false;
+
+do_fault:
+ fi->type = fault_type;
+ fi->level = level;
+ /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
+ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
+ mmu_idx == ARMMMUIdx_Stage2_S);
+ fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
+ return true;
+}
+
+static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
+ ARMMMUIdx mmu_idx,
+ int32_t address, int *prot)
+{
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ *prot = PAGE_READ | PAGE_WRITE;
+ switch (address) {
+ case 0xF0000000 ... 0xFFFFFFFF:
+ if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
+ /* hivecs execing is ok */
+ *prot |= PAGE_EXEC;
+ }
+ break;
+ case 0x00000000 ... 0x7FFFFFFF:
+ *prot |= PAGE_EXEC;
+ break;
+ }
+ } else {
+ /* Default system address map for M profile cores.
+ * The architecture specifies which regions are execute-never;
+ * at the MPU level no other checks are defined.
+ */
+ switch (address) {
+ case 0x00000000 ... 0x1fffffff: /* ROM */
+ case 0x20000000 ... 0x3fffffff: /* SRAM */
+ case 0x60000000 ... 0x7fffffff: /* RAM */
+ case 0x80000000 ... 0x9fffffff: /* RAM */
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ break;
+ case 0x40000000 ... 0x5fffffff: /* Peripheral */
+ case 0xa0000000 ... 0xbfffffff: /* Device */
+ case 0xc0000000 ... 0xdfffffff: /* Device */
+ case 0xe0000000 ... 0xffffffff: /* System */
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static bool pmsav7_use_background_region(ARMCPU *cpu,
+ ARMMMUIdx mmu_idx, bool is_user)
+{
+ /* Return true if we should use the default memory map as a
+ * "background" region if there are no hits against any MPU regions.
+ */
+ CPUARMState *env = &cpu->env;
+
+ if (is_user) {
+ return false;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
+ & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
+ } else {
+ return regime_sctlr(env, mmu_idx) & SCTLR_BR;
+ }
+}
+
+static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
+{
+ /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff
*/
+ return arm_feature(env, ARM_FEATURE_M) &&
+ extract32(address, 20, 12) == 0xe00;
+}
+
+static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
+{
+ /* True if address is in the M profile system region
+ * 0xe0000000 - 0xffffffff
+ */
+ return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
+}
+
+static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int n;
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ *phys_ptr = address;
+ *page_size = TARGET_PAGE_SIZE;
+ *prot = 0;
+
+ if (regime_translation_disabled(env, mmu_idx) ||
+ m_is_ppb_region(env, address)) {
+ /* MPU disabled or M profile PPB access: use default memory map.
+ * The other case which uses the default memory map in the
+ * v7M ARM ARM pseudocode is exception vector reads from the vector
+ * table. In QEMU those accesses are done in arm_v7m_load_vector(),
+ * which always does a direct read using address_space_ldl(), rather
+ * than going via this function, so we don't need to check that here.
+ */
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else { /* MPU enabled */
+ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ /* region search */
+ uint32_t base = env->pmsav7.drbar[n];
+ uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
+ uint32_t rmask;
+ bool srdis = false;
+
+ if (!(env->pmsav7.drsr[n] & 0x1)) {
+ continue;
+ }
+
+ if (!rsize) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRSR[%d]: Rsize field cannot be 0\n", n);
+ continue;
+ }
+ rsize++;
+ rmask = (1ull << rsize) - 1;
+
+ if (base & rmask) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRBAR[%d]: 0x%" PRIx32 " misaligned "
+ "to DRSR region size, mask = 0x%" PRIx32 "\n",
+ n, base, rmask);
+ continue;
+ }
+
+ if (address < base || address > base + rmask) {
+ /*
+ * Address not in this region. We must check whether the
+ * region covers addresses in the same page as our address.
+ * In that case we must not report a size that covers the
+ * whole page for a subsequent hit against a different MPU
+ * region or the background region, because it would result in
+ * incorrect TLB hits for subsequent accesses to addresses that
+ * are in this MPU region.
+ */
+ if (ranges_overlap(base, rmask,
+ address & TARGET_PAGE_MASK,
+ TARGET_PAGE_SIZE)) {
+ *page_size = 1;
+ }
+ continue;
+ }
+
+ /* Region matched */
+
+ if (rsize >= 8) { /* no subregions for regions < 256 bytes */
+ int i, snd;
+ uint32_t srdis_mask;
+
+ rsize -= 3; /* sub region size (power of 2) */
+ snd = ((address - base) >> rsize) & 0x7;
+ srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
+
+ srdis_mask = srdis ? 0x3 : 0x0;
+ for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
+ /* This will check in groups of 2, 4 and then 8, whether
+ * the subregion bits are consistent. rsize is incremented
+ * back up to give the region size, considering consistent
+ * adjacent subregions as one region. Stop testing if rsize
+ * is already big enough for an entire QEMU page.
+ */
+ int snd_rounded = snd & ~(i - 1);
+ uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
+ snd_rounded + 8, i);
+ if (srdis_mask ^ srdis_multi) {
+ break;
+ }
+ srdis_mask = (srdis_mask << i) | srdis_mask;
+ rsize++;
+ }
+ }
+ if (srdis) {
+ continue;
+ }
+ if (rsize < TARGET_PAGE_BITS) {
+ *page_size = 1 << rsize;
+ }
+ break;
+ }
+
+ if (n == -1) { /* no hits */
+ if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
+ /* background fault */
+ fi->type = ARMFault_Background;
+ return true;
+ }
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else { /* a MPU hit! */
+ uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
+ uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
+
+ if (m_is_system_region(env, address)) {
+ /* System space is always execute never */
+ xn = 1;
+ }
+
+ if (is_user) { /* User mode AP bit decoding */
+ switch (ap) {
+ case 0:
+ case 1:
+ case 5:
+ break; /* no access */
+ case 3:
+ *prot |= PAGE_WRITE;
+ /* fall through */
+ case 2:
+ case 6:
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ case 7:
+ /* for v7M, same as 6; for R profile a reserved value */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ }
+ /* fall through */
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
+ }
+ } else { /* Priv. mode AP bits decoding */
+ switch (ap) {
+ case 0:
+ break; /* no access */
+ case 1:
+ case 2:
+ case 3:
+ *prot |= PAGE_WRITE;
+ /* fall through */
+ case 5:
+ case 6:
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ case 7:
+ /* for v7M, same as 6; for R profile a reserved value */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ }
+ /* fall through */
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
+ }
+ }
+
+ /* execute never */
+ if (xn) {
+ *prot &= ~PAGE_EXEC;
+ }
+ }
+ }
+
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return !(*prot & (1 << access_type));
+}
+
+static bool v8m_is_sau_exempt(CPUARMState *env,
+ uint32_t address, MMUAccessType access_type)
+{
+ /* The architecture specifies that certain address ranges are
+ * exempt from v8M SAU/IDAU checks.
+ */
+ return
+ (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
+ (address >= 0xe0000000 && address <= 0xe0002fff) ||
+ (address >= 0xe000e000 && address <= 0xe000efff) ||
+ (address >= 0xe002e000 && address <= 0xe002efff) ||
+ (address >= 0xe0040000 && address <= 0xe0041fff) ||
+ (address >= 0xe00ff000 && address <= 0xe00fffff);
+}
+
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs)
+{
+ /* Look up the security attributes for this address. Compare the
+ * pseudocode SecurityCheck() function.
+ * We assume the caller has zero-initialized *sattrs.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ int r;
+ bool idau_exempt = false, idau_ns = true, idau_nsc = true;
+ int idau_region = IREGION_NOTVALID;
+ uint32_t addr_page_base = address & TARGET_PAGE_MASK;
+ uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
+
+ if (cpu->idau) {
+ IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
+ IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
+
+ iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
+ &idau_nsc);
+ }
+
+ if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
+ /* 0xf0000000..0xffffffff is always S for insn fetches */
+ return;
+ }
+
+ if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
+ sattrs->ns = !regime_is_secure(env, mmu_idx);
+ return;
+ }
+
+ if (idau_region != IREGION_NOTVALID) {
+ sattrs->irvalid = true;
+ sattrs->iregion = idau_region;
+ }
+
+ switch (env->sau.ctrl & 3) {
+ case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
+ break;
+ case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
+ sattrs->ns = true;
+ break;
+ default: /* SAU.ENABLE == 1 */
+ for (r = 0; r < cpu->sau_sregion; r++) {
+ if (env->sau.rlar[r] & 1) {
+ uint32_t base = env->sau.rbar[r] & ~0x1f;
+ uint32_t limit = env->sau.rlar[r] | 0x1f;
+
+ if (base <= address && limit >= address) {
+ if (base > addr_page_base || limit < addr_page_limit) {
+ sattrs->subpage = true;
+ }
+ if (sattrs->srvalid) {
+ /* If we hit in more than one region then we must
report
+ * as Secure, not NS-Callable, with no valid region
+ * number info.
+ */
+ sattrs->ns = false;
+ sattrs->nsc = false;
+ sattrs->sregion = 0;
+ sattrs->srvalid = false;
+ break;
+ } else {
+ if (env->sau.rlar[r] & 2) {
+ sattrs->nsc = true;
+ } else {
+ sattrs->ns = true;
+ }
+ sattrs->srvalid = true;
+ sattrs->sregion = r;
+ }
+ } else {
+ /*
+ * Address not in this region. We must check whether the
+ * region covers addresses in the same page as our address.
+ * In that case we must not report a size that covers the
+ * whole page for a subsequent hit against a different MPU
+ * region or the background region, because it would result
+ * in incorrect TLB hits for subsequent accesses to
+ * addresses that are in this MPU region.
+ */
+ if (limit >= base &&
+ ranges_overlap(base, limit - base + 1,
+ addr_page_base,
+ TARGET_PAGE_SIZE)) {
+ sattrs->subpage = true;
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ /*
+ * The IDAU will override the SAU lookup results if it specifies
+ * higher security than the SAU does.
+ */
+ if (!idau_ns) {
+ if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
+ sattrs->ns = false;
+ sattrs->nsc = idau_nsc;
+ }
+ }
+}
+
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion)
+{
+ /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
+ * that a full phys-to-virt translation does).
+ * mregion is (if not NULL) set to the region number which matched,
+ * or -1 if no region number is returned (MPU off, address did not
+ * hit a region, address hit in multiple regions).
+ * We set is_subpage to true if the region hit doesn't cover the
+ * entire TARGET_PAGE the address is within.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ bool is_user = regime_is_user(env, mmu_idx);
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ int n;
+ int matchregion = -1;
+ bool hit = false;
+ uint32_t addr_page_base = address & TARGET_PAGE_MASK;
+ uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
+
+ *is_subpage = false;
+ *phys_ptr = address;
+ *prot = 0;
+ if (mregion) {
+ *mregion = -1;
+ }
+
+ /* Unlike the ARM ARM pseudocode, we don't need to check whether this
+ * was an exception vector read from the vector table (which is always
+ * done using the default system address map), because those accesses
+ * are done in arm_v7m_load_vector(), which always does a direct
+ * read using address_space_ldl(), rather than going via this function.
+ */
+ if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
+ hit = true;
+ } else if (m_is_ppb_region(env, address)) {
+ hit = true;
+ } else {
+ if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
+ hit = true;
+ }
+
+ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ /* region search */
+ /* Note that the base address is bits [31:5] from the register
+ * with bits [4:0] all zeroes, but the limit address is bits
+ * [31:5] from the register with bits [4:0] all ones.
+ */
+ uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
+ uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
+
+ if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
+ /* Region disabled */
+ continue;
+ }
+
+ if (address < base || address > limit) {
+ /*
+ * Address not in this region. We must check whether the
+ * region covers addresses in the same page as our address.
+ * In that case we must not report a size that covers the
+ * whole page for a subsequent hit against a different MPU
+ * region or the background region, because it would result in
+ * incorrect TLB hits for subsequent accesses to addresses that
+ * are in this MPU region.
+ */
+ if (limit >= base &&
+ ranges_overlap(base, limit - base + 1,
+ addr_page_base,
+ TARGET_PAGE_SIZE)) {
+ *is_subpage = true;
+ }
+ continue;
+ }
+
+ if (base > addr_page_base || limit < addr_page_limit) {
+ *is_subpage = true;
+ }
+
+ if (matchregion != -1) {
+ /* Multiple regions match -- always a failure (unlike
+ * PMSAv7 where highest-numbered-region wins)
+ */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+
+ matchregion = n;
+ hit = true;
+ }
+ }
+
+ if (!hit) {
+ /* background fault */
+ fi->type = ARMFault_Background;
+ return true;
+ }
+
+ if (matchregion == -1) {
+ /* hit using the background region */
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else {
+ uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
+ uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
+ bool pxn = false;
+
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
+ pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
+ }
+
+ if (m_is_system_region(env, address)) {
+ /* System space is always execute never */
+ xn = 1;
+ }
+
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
+ if (*prot && !xn && !(pxn && !is_user)) {
+ *prot |= PAGE_EXEC;
+ }
+ /* We don't need to look the attribute up in the MAIR0/MAIR1
+ * registers because that only tells us about cacheability.
+ */
+ if (mregion) {
+ *mregion = matchregion;
+ }
+ }
+
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return !(*prot & (1 << access_type));
+}
+
+
+static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, target_ulong *page_size,
+ ARMMMUFaultInfo *fi)
+{
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ V8M_SAttributes sattrs = {};
+ bool ret;
+ bool mpu_is_subpage;
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
+ if (access_type == MMU_INST_FETCH) {
+ /* Instruction fetches always use the MMU bank and the
+ * transaction attribute determined by the fetch address,
+ * regardless of CPU state. This is painful for QEMU
+ * to handle, because it would mean we need to encode
+ * into the mmu_idx not just the (user, negpri) information
+ * for the current security state but also that for the
+ * other security state, which would balloon the number
+ * of mmu_idx values needed alarmingly.
+ * Fortunately we can avoid this because it's not actually
+ * possible to arbitrarily execute code from memory with
+ * the wrong security attribute: it will always generate
+ * an exception of some kind or another, apart from the
+ * special case of an NS CPU executing an SG instruction
+ * in S&NSC memory. So we always just fail the translation
+ * here and sort things out in the exception handler
+ * (including possibly emulating an SG instruction).
+ */
+ if (sattrs.ns != !secure) {
+ if (sattrs.nsc) {
+ fi->type = ARMFault_QEMU_NSCExec;
+ } else {
+ fi->type = ARMFault_QEMU_SFault;
+ }
+ *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ } else {
+ /* For data accesses we always use the MMU bank indicated
+ * by the current CPU state, but the security attributes
+ * might downgrade a secure access to nonsecure.
+ */
+ if (sattrs.ns) {
+ txattrs->secure = false;
+ } else if (!secure) {
+ /* NS access to S memory must fault.
+ * Architecturally we should first check whether the
+ * MPU information for this address indicates that we
+ * are doing an unaligned access to Device memory, which
+ * should generate a UsageFault instead. QEMU does not
+ * currently check for that kind of unaligned access though.
+ * If we added it we would need to do so as a special case
+ * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
+ */
+ fi->type = ARMFault_QEMU_SFault;
+ *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ }
+ }
+
+ ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
+ txattrs, prot, &mpu_is_subpage, fi, NULL);
+ *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
+ return ret;
+}
+
+static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot,
+ ARMMMUFaultInfo *fi)
+{
+ int n;
+ uint32_t mask;
+ uint32_t base;
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ if (regime_translation_disabled(env, mmu_idx)) {
+ /* MPU disabled. */
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return false;
+ }
+
+ *phys_ptr = address;
+ for (n = 7; n >= 0; n--) {
+ base = env->cp15.c6_region[n];
+ if ((base & 1) == 0) {
+ continue;
+ }
+ mask = 1 << ((base >> 1) & 0x1f);
+ /* Keep this shift separate from the above to avoid an
+ (undefined) << 32. */
+ mask = (mask << 1) - 1;
+ if (((base ^ address) & ~mask) == 0) {
+ break;
+ }
+ }
+ if (n < 0) {
+ fi->type = ARMFault_Background;
+ return true;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ mask = env->cp15.pmsav5_insn_ap;
+ } else {
+ mask = env->cp15.pmsav5_data_ap;
+ }
+ mask = (mask >> (n * 4)) & 0xf;
+ switch (mask) {
+ case 0:
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ case 1:
+ if (is_user) {
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 2:
+ *prot = PAGE_READ;
+ if (!is_user) {
+ *prot |= PAGE_WRITE;
+ }
+ break;
+ case 3:
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 5:
+ if (is_user) {
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+ *prot = PAGE_READ;
+ break;
+ case 6:
+ *prot = PAGE_READ;
+ break;
+ default:
+ /* Bad permission. */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+ *prot |= PAGE_EXEC;
+ return false;
+}
+
+/* Combine either inner or outer cacheability attributes for normal
+ * memory, according to table D4-42 and pseudocode procedure
+ * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
+ *
+ * NB: only stage 1 includes allocation hints (RW bits), leading to
+ * some asymmetry.
+ */
+static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
+{
+ if (s1 == 4 || s2 == 4) {
+ /* non-cacheable has precedence */
+ return 4;
+ } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
+ /* stage 1 write-through takes precedence */
+ return s1;
+ } else if (extract32(s2, 2, 2) == 2) {
+ /* stage 2 write-through takes precedence, but the allocation hint
+ * is still taken from stage 1
+ */
+ return (2 << 2) | extract32(s1, 0, 2);
+ } else { /* write-back */
+ return s1;
+ }
+}
+
+/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
+ * and CombineS1S2Desc()
+ *
+ * @s1: Attributes from stage 1 walk
+ * @s2: Attributes from stage 2 walk
+ */
+static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
+{
+ uint8_t s1lo, s2lo, s1hi, s2hi;
+ ARMCacheAttrs ret;
+ bool tagged = false;
+
+ if (s1.attrs == 0xf0) {
+ tagged = true;
+ s1.attrs = 0xff;
+ }
+
+ s1lo = extract32(s1.attrs, 0, 4);
+ s2lo = extract32(s2.attrs, 0, 4);
+ s1hi = extract32(s1.attrs, 4, 4);
+ s2hi = extract32(s2.attrs, 4, 4);
+
+ /* Combine shareability attributes (table D4-43) */
+ if (s1.shareability == 2 || s2.shareability == 2) {
+ /* if either are outer-shareable, the result is outer-shareable */
+ ret.shareability = 2;
+ } else if (s1.shareability == 3 || s2.shareability == 3) {
+ /* if either are inner-shareable, the result is inner-shareable */
+ ret.shareability = 3;
+ } else {
+ /* both non-shareable */
+ ret.shareability = 0;
+ }
+
+ /* Combine memory type and cacheability attributes */
+ if (s1hi == 0 || s2hi == 0) {
+ /* Device has precedence over normal */
+ if (s1lo == 0 || s2lo == 0) {
+ /* nGnRnE has precedence over anything */
+ ret.attrs = 0;
+ } else if (s1lo == 4 || s2lo == 4) {
+ /* non-Reordering has precedence over Reordering */
+ ret.attrs = 4; /* nGnRE */
+ } else if (s1lo == 8 || s2lo == 8) {
+ /* non-Gathering has precedence over Gathering */
+ ret.attrs = 8; /* nGRE */
+ } else {
+ ret.attrs = 0xc; /* GRE */
+ }
+
+ /* Any location for which the resultant memory type is any
+ * type of Device memory is always treated as Outer Shareable.
+ */
+ ret.shareability = 2;
+ } else { /* Normal memory */
+ /* Outer/inner cacheability combine independently */
+ ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
+ | combine_cacheattr_nibble(s1lo, s2lo);
+
+ if (ret.attrs == 0x44) {
+ /* Any location for which the resultant memory type is Normal
+ * Inner Non-cacheable, Outer Non-cacheable is always treated
+ * as Outer Shareable.
+ */
+ ret.shareability = 2;
+ }
+ }
+
+ /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
+ if (tagged && ret.attrs == 0xff) {
+ ret.attrs = 0xf0;
+ }
+
+ return ret;
+}
+
+
+/* get_phys_addr - get the physical address for this virtual address
+ *
+ * Find the physical address corresponding to the given virtual address,
+ * by doing a translation table walk on MMU based systems or using the
+ * MPU state on MPU based systems.
+ *
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
+ * prot and page_size may not be filled in, and the populated fsr value
provides
+ * information on why the translation aborted, in the format of a
+ * DFSR/IFSR fault register, with the following caveats:
+ * * we honour the short vs long DFSR format differences.
+ * * the WnR bit is never set (the caller must do this).
+ * * for PSMAv5 based systems we don't bother to return a full FSR format
+ * value.
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index indicating required translation regime
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @attrs: set to the memory transaction attributes to use
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size: set to the size of the page containing phys_ptr
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
+ */
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+{
+ ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
+
+ if (mmu_idx != s1_mmu_idx) {
+ /* Call ourselves recursively to do the stage 1 and then stage 2
+ * translations if mmu_idx is a two-stage regime.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ hwaddr ipa;
+ int s2_prot;
+ int ret;
+ ARMCacheAttrs cacheattrs2 = {};
+ ARMMMUIdx s2_mmu_idx;
+ bool is_el0;
+
+ ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
+ attrs, prot, page_size, fi, cacheattrs);
+
+ /* If S1 fails or S2 is disabled, return early. */
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
+ *phys_ptr = ipa;
+ return ret;
+ }
+
+ s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
+ is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
+
+ /* S1 is done. Now do S2 translation. */
+ ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
+ phys_ptr, attrs, &s2_prot,
+ page_size, fi, &cacheattrs2);
+ fi->s2addr = ipa;
+ /* Combine the S1 and S2 perms. */
+ *prot &= s2_prot;
+
+ /* If S2 fails, return early. */
+ if (ret) {
+ return ret;
+ }
+
+ /* Combine the S1 and S2 cache attributes. */
+ if (arm_hcr_el2_eff(env) & HCR_DC) {
+ /*
+ * HCR.DC forces the first stage attributes to
+ * Normal Non-Shareable,
+ * Inner Write-Back Read-Allocate Write-Allocate,
+ * Outer Write-Back Read-Allocate Write-Allocate.
+ * Do not overwrite Tagged within attrs.
+ */
+ if (cacheattrs->attrs != 0xf0) {
+ cacheattrs->attrs = 0xff;
+ }
+ cacheattrs->shareability = 0;
+ }
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+
+ /* Check if IPA translates to secure or non-secure PA space. */
+ if (arm_is_secure_below_el3(env)) {
+ if (attrs->secure) {
+ attrs->secure =
+ !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
+ } else {
+ attrs->secure =
+ !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
+ || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA));
+ }
+ }
+ return 0;
+ } else {
+ /*
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
+ }
+ }
+
+ /* The page table entries may downgrade secure to non-secure, but
+ * cannot upgrade an non-secure translation regime's attributes
+ * to secure.
+ */
+ attrs->secure = regime_is_secure(env, mmu_idx);
+ attrs->user = regime_is_user(env, mmu_idx);
+
+ /* Fast Context Switch Extension. This doesn't exist at all in v8.
+ * In v7 and earlier it affects all stage 1 translations.
+ */
+ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
+ && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (regime_el(env, mmu_idx) == 3) {
+ address += env->cp15.fcseidr_s;
+ } else {
+ address += env->cp15.fcseidr_ns;
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
+ bool ret;
+ *page_size = TARGET_PAGE_SIZE;
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* PMSAv8 */
+ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size, fi);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ /* PMSAv7 */
+ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
+ phys_ptr, prot, page_size, fi);
+ } else {
+ /* Pre-v7 MPU */
+ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fi);
+ }
+ qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
+ " mmu_idx %u -> %s (prot %c%c%c)\n",
+ access_type == MMU_DATA_LOAD ? "reading" :
+ (access_type == MMU_DATA_STORE ? "writing" : "execute"),
+ (uint32_t)address, mmu_idx,
+ ret ? "Miss" : "Hit",
+ *prot & PAGE_READ ? 'r' : '-',
+ *prot & PAGE_WRITE ? 'w' : '-',
+ *prot & PAGE_EXEC ? 'x' : '-');
+
+ return ret;
+ }
+
+ /* Definitely a real MMU, not an MPU */
+
+ if (regime_translation_disabled(env, mmu_idx)) {
+ uint64_t hcr;
+ uint8_t memattr;
+
+ /*
+ * MMU disabled. S1 addresses within aa64 translation regimes are
+ * still checked for bounds -- see AArch64.TranslateAddressS1Off.
+ */
+ if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
+ int r_el = regime_el(env, mmu_idx);
+ if (arm_el_is_aa64(env, r_el)) {
+ int pamax = arm_pamax(env_archcpu(env));
+ uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
+ int addrtop, tbi;
+
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ if (access_type == MMU_INST_FETCH) {
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
+ }
+ tbi = (tbi >> extract64(address, 55, 1)) & 1;
+ addrtop = (tbi ? 55 : 63);
+
+ if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
+ fi->type = ARMFault_AddressSize;
+ fi->level = 0;
+ fi->stage2 = false;
+ return 1;
+ }
+
+ /*
+ * When TBI is disabled, we've just validated that all of the
+ * bits above PAMax are zero, so logically we only need to
+ * clear the top byte for TBI. But it's clearer to follow
+ * the pseudocode set of addrdesc.paddress.
+ */
+ address = extract64(address, 0, 52);
+ }
+ }
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ *page_size = TARGET_PAGE_SIZE;
+
+ /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
+ hcr = arm_hcr_el2_eff(env);
+ cacheattrs->shareability = 0;
+ if (hcr & HCR_DC) {
+ if (hcr & HCR_DCT) {
+ memattr = 0xf0; /* Tagged, Normal, WB, RWA */
+ } else {
+ memattr = 0xff; /* Normal, WB, RWA */
+ }
+ } else if (access_type == MMU_INST_FETCH) {
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
+ memattr = 0xee; /* Normal, WT, RA, NT */
+ } else {
+ memattr = 0x44; /* Normal, NC, No */
+ }
+ cacheattrs->shareability = 2; /* outer sharable */
+ } else {
+ memattr = 0x00; /* Device, nGnRnE */
+ }
+ cacheattrs->attrs = memattr;
+ return 0;
+ }
+
+ if (regime_using_lpae_format(env, mmu_idx)) {
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
+ phys_ptr, attrs, prot, page_size,
+ fi, cacheattrs);
+ } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
+ return get_phys_addr_v6(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size, fi);
+ } else {
+ return get_phys_addr_v5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, page_size, fi);
+ }
+}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot;
+ bool ret;
+ ARMMMUFaultInfo fi = {};
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ ARMCacheAttrs cacheattrs = {};
+
+ *attrs = (MemTxAttrs) {};
+
+ ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
+ attrs, &prot, &page_size, &fi, &cacheattrs);
+
+ if (ret) {
+ return -1;
+ }
+ return phys_addr;
+}
diff --git a/target/arm/cpu-mmu.c b/target/arm/cpu-mmu.c
new file mode 100644
index 0000000000..f463f8458e
--- /dev/null
+++ b/target/arm/cpu-mmu.c
@@ -0,0 +1,124 @@
+/*
+ * QEMU ARM CPU address translation related code
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu-mmu.h"
+
+int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+ if (regime_has_2_ranges(mmu_idx)) {
+ return extract64(tcr, 37, 2);
+ } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ return 0; /* VTCR_EL2 */
+ } else {
+ /* Replicate the single TBI bit so we always have 2 bits. */
+ return extract32(tcr, 20, 1) * 3;
+ }
+}
+
+int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+ if (regime_has_2_ranges(mmu_idx)) {
+ return extract64(tcr, 51, 2);
+ } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ return 0; /* VTCR_EL2 */
+ } else {
+ /* Replicate the single TBID bit so we always have 2 bits. */
+ return extract32(tcr, 29, 1) * 3;
+ }
+}
+
+int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+ if (regime_has_2_ranges(mmu_idx)) {
+ return extract64(tcr, 57, 2);
+ } else {
+ /* Replicate the single TCMA bit so we always have 2 bits. */
+ return extract32(tcr, 30, 1) * 3;
+ }
+}
+
+ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
+ ARMMMUIdx mmu_idx, bool data)
+{
+ uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ bool epd, hpd, using16k, using64k;
+ int select, tsz, tbi, max_tsz;
+
+ if (!regime_has_2_ranges(mmu_idx)) {
+ select = 0;
+ tsz = extract32(tcr, 0, 6);
+ using64k = extract32(tcr, 14, 1);
+ using16k = extract32(tcr, 15, 1);
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ /* VTCR_EL2 */
+ hpd = false;
+ } else {
+ hpd = extract32(tcr, 24, 1);
+ }
+ epd = false;
+ } else {
+ /*
+ * Bit 55 is always between the two regions, and is canonical for
+ * determining if address tagging is enabled.
+ */
+ select = extract64(va, 55, 1);
+ if (!select) {
+ tsz = extract32(tcr, 0, 6);
+ epd = extract32(tcr, 7, 1);
+ using64k = extract32(tcr, 14, 1);
+ using16k = extract32(tcr, 15, 1);
+ hpd = extract64(tcr, 41, 1);
+ } else {
+ int tg = extract32(tcr, 30, 2);
+ using16k = tg == 1;
+ using64k = tg == 3;
+ tsz = extract32(tcr, 16, 6);
+ epd = extract32(tcr, 23, 1);
+ hpd = extract64(tcr, 42, 1);
+ }
+ }
+
+ if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
+ max_tsz = 48 - using64k;
+ } else {
+ max_tsz = 39;
+ }
+
+ tsz = MIN(tsz, max_tsz);
+ tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
+
+ /* Present TBI as a composite with TBID. */
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ if (!data) {
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
+ }
+ tbi = (tbi >> select) & 1;
+
+ return (ARMVAParameters) {
+ .tsz = tsz,
+ .select = select,
+ .tbi = tbi,
+ .epd = epd,
+ .hpd = hpd,
+ .using16k = using16k,
+ .using64k = using64k,
+ };
+}
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index a68804f6fc..cac7b7f06f 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -42,6 +42,7 @@
#include "kvm_arm.h"
#include "disas/capstone.h"
#include "fpu/softfloat.h"
+#include "cpu-mmu.h"
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
{
diff --git a/target/arm/tcg/helper.c b/target/arm/tcg/helper.c
index d9220be7c5..7345e090fa 100644
--- a/target/arm/tcg/helper.c
+++ b/target/arm/tcg/helper.c
@@ -36,23 +36,12 @@
#include "exec/cpu_ldst.h"
#include "semihosting/common-semi.h"
#endif
+#include "cpu-mmu.h"
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
-#ifndef CONFIG_USER_ONLY
-
-static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool s1_is_el0,
- hwaddr *phys_ptr, MemTxAttrs *txattrs, int
*prot,
- target_ulong *page_size_ptr,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
- __attribute__((nonnull));
-#endif
-
static void switch_mode(CPUARMState *env, int mode);
-static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
{
@@ -10129,123 +10118,6 @@ uint64_t arm_sctlr(CPUARMState *env, int el)
return env->cp15.sctlr_el[el];
}
-/* Return the SCTLR value which controls this address translation regime */
-static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
-}
-
-#ifndef CONFIG_USER_ONLY
-
-/* Return true if the specified stage of address translation is disabled */
-static inline bool regime_translation_disabled(CPUARMState *env,
- ARMMMUIdx mmu_idx)
-{
- uint64_t hcr_el2;
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
- (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
- case R_V7M_MPU_CTRL_ENABLE_MASK:
- /* Enabled, but not for HardFault and NMI */
- return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
- case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
- /* Enabled for all cases */
- return false;
- case 0:
- default:
- /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
- * we warned about that in armv7m_nvic.c when the guest set it.
- */
- return true;
- }
- }
-
- hcr_el2 = arm_hcr_el2_eff(env);
-
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- /* HCR.DC means HCR.VM behaves as 1 */
- return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
- }
-
- if (hcr_el2 & HCR_TGE) {
- /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
- if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
- return true;
- }
- }
-
- if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
- /* HCR.DC means SCTLR_EL1.M behaves as 0 */
- return true;
- }
-
- return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
-}
-
-static inline bool regime_translation_big_endian(CPUARMState *env,
- ARMMMUIdx mmu_idx)
-{
- return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
-}
-
-/* Return the TTBR associated with this translation regime */
-static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
- int ttbrn)
-{
- if (mmu_idx == ARMMMUIdx_Stage2) {
- return env->cp15.vttbr_el2;
- }
- if (mmu_idx == ARMMMUIdx_Stage2_S) {
- return env->cp15.vsttbr_el2;
- }
- if (ttbrn == 0) {
- return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
- } else {
- return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
- }
-}
-
-#endif /* !CONFIG_USER_ONLY */
-
-/* Convert a possible stage1+2 MMU index into the appropriate
- * stage 1 MMU index
- */
-static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_SE10_0:
- return ARMMMUIdx_Stage1_SE0;
- case ARMMMUIdx_SE10_1:
- return ARMMMUIdx_Stage1_SE1;
- case ARMMMUIdx_SE10_1_PAN:
- return ARMMMUIdx_Stage1_SE1_PAN;
- case ARMMMUIdx_E10_0:
- return ARMMMUIdx_Stage1_E0;
- case ARMMMUIdx_E10_1:
- return ARMMMUIdx_Stage1_E1;
- case ARMMMUIdx_E10_1_PAN:
- return ARMMMUIdx_Stage1_E1_PAN;
- default:
- return mmu_idx;
- }
-}
-
-/* Return true if the translation regime is using LPAE format page tables */
-static inline bool regime_using_lpae_format(CPUARMState *env,
- ARMMMUIdx mmu_idx)
-{
- int el = regime_el(env, mmu_idx);
- if (el == 2 || arm_el_is_aa64(env, el)) {
- return true;
- }
- if (arm_feature(env, ARM_FEATURE_LPAE)
- && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
- return true;
- }
- return false;
-}
-
/* Returns true if the stage 1 translation regime is using LPAE format page
* tables. Used when raising alignment exceptions, whose FSR changes depending
* on whether the long or short descriptor format is in use. */
@@ -10256,2268 +10128,6 @@ bool arm_s1_regime_using_lpae_format(CPUARMState
*env, ARMMMUIdx mmu_idx)
return regime_using_lpae_format(env, mmu_idx);
}
-#ifndef CONFIG_USER_ONLY
-static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_SE10_0:
- case ARMMMUIdx_E20_0:
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_Stage1_SE0:
- case ARMMMUIdx_MUser:
- case ARMMMUIdx_MSUser:
- case ARMMMUIdx_MUserNegPri:
- case ARMMMUIdx_MSUserNegPri:
- return true;
- default:
- return false;
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- g_assert_not_reached();
- }
-}
-
-/* Translate section/page access permissions to page
- * R/W protection flags
- *
- * @env: CPUARMState
- * @mmu_idx: MMU index indicating required translation regime
- * @ap: The 3-bit access permissions (AP[2:0])
- * @domain_prot: The 2-bit domain access permissions
- */
-static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
- int ap, int domain_prot)
-{
- bool is_user = regime_is_user(env, mmu_idx);
-
- if (domain_prot == 3) {
- return PAGE_READ | PAGE_WRITE;
- }
-
- switch (ap) {
- case 0:
- if (arm_feature(env, ARM_FEATURE_V7)) {
- return 0;
- }
- switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
- case SCTLR_S:
- return is_user ? 0 : PAGE_READ;
- case SCTLR_R:
- return PAGE_READ;
- default:
- return 0;
- }
- case 1:
- return is_user ? 0 : PAGE_READ | PAGE_WRITE;
- case 2:
- if (is_user) {
- return PAGE_READ;
- } else {
- return PAGE_READ | PAGE_WRITE;
- }
- case 3:
- return PAGE_READ | PAGE_WRITE;
- case 4: /* Reserved. */
- return 0;
- case 5:
- return is_user ? 0 : PAGE_READ;
- case 6:
- return PAGE_READ;
- case 7:
- if (!arm_feature(env, ARM_FEATURE_V6K)) {
- return 0;
- }
- return PAGE_READ;
- default:
- g_assert_not_reached();
- }
-}
-
-/* Translate section/page access permissions to page
- * R/W protection flags.
- *
- * @ap: The 2-bit simple AP (AP[2:1])
- * @is_user: TRUE if accessing from PL0
- */
-static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
-{
- switch (ap) {
- case 0:
- return is_user ? 0 : PAGE_READ | PAGE_WRITE;
- case 1:
- return PAGE_READ | PAGE_WRITE;
- case 2:
- return is_user ? 0 : PAGE_READ;
- case 3:
- return PAGE_READ;
- default:
- g_assert_not_reached();
- }
-}
-
-static inline int
-simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
-{
- return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
-}
-
-/* Translate S2 section/page access permissions to protection flags
- *
- * @env: CPUARMState
- * @s2ap: The 2-bit stage2 access permissions (S2AP)
- * @xn: XN (execute-never) bits
- * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
- */
-static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
-{
- int prot = 0;
-
- if (s2ap & 1) {
- prot |= PAGE_READ;
- }
- if (s2ap & 2) {
- prot |= PAGE_WRITE;
- }
-
- if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
- switch (xn) {
- case 0:
- prot |= PAGE_EXEC;
- break;
- case 1:
- if (s1_is_el0) {
- prot |= PAGE_EXEC;
- }
- break;
- case 2:
- break;
- case 3:
- if (!s1_is_el0) {
- prot |= PAGE_EXEC;
- }
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- if (!extract32(xn, 1, 1)) {
- if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
- prot |= PAGE_EXEC;
- }
- }
- }
- return prot;
-}
-
-/* Translate section/page access permissions to protection flags
- *
- * @env: CPUARMState
- * @mmu_idx: MMU index indicating required translation regime
- * @is_aa64: TRUE if AArch64
- * @ap: The 2-bit simple AP (AP[2:1])
- * @ns: NS (non-secure) bit
- * @xn: XN (execute-never) bit
- * @pxn: PXN (privileged execute-never) bit
- */
-static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
- int ap, int ns, int xn, int pxn)
-{
- bool is_user = regime_is_user(env, mmu_idx);
- int prot_rw, user_rw;
- bool have_wxn;
- int wxn = 0;
-
- assert(mmu_idx != ARMMMUIdx_Stage2);
- assert(mmu_idx != ARMMMUIdx_Stage2_S);
-
- user_rw = simple_ap_to_rw_prot_is_user(ap, true);
- if (is_user) {
- prot_rw = user_rw;
- } else {
- if (user_rw && regime_is_pan(env, mmu_idx)) {
- /* PAN forbids data accesses but doesn't affect insn fetch */
- prot_rw = 0;
- } else {
- prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
- }
- }
-
- if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
- return prot_rw;
- }
-
- /* TODO have_wxn should be replaced with
- * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
- * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
- * compatible processors have EL2, which is required for [U]WXN.
- */
- have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
-
- if (have_wxn) {
- wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
- }
-
- if (is_aa64) {
- if (regime_has_2_ranges(mmu_idx) && !is_user) {
- xn = pxn || (user_rw & PAGE_WRITE);
- }
- } else if (arm_feature(env, ARM_FEATURE_V7)) {
- switch (regime_el(env, mmu_idx)) {
- case 1:
- case 3:
- if (is_user) {
- xn = xn || !(user_rw & PAGE_READ);
- } else {
- int uwxn = 0;
- if (have_wxn) {
- uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
- }
- xn = xn || !(prot_rw & PAGE_READ) || pxn ||
- (uwxn && (user_rw & PAGE_WRITE));
- }
- break;
- case 2:
- break;
- }
- } else {
- xn = wxn = 0;
- }
-
- if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
- return prot_rw;
- }
- return prot_rw | PAGE_EXEC;
-}
-
-static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
- uint32_t *table, uint32_t address)
-{
- /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
- TCR *tcr = regime_tcr(env, mmu_idx);
-
- if (address & tcr->mask) {
- if (tcr->raw_tcr & TTBCR_PD1) {
- /* Translation table walk disabled for TTBR1 */
- return false;
- }
- *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
- } else {
- if (tcr->raw_tcr & TTBCR_PD0) {
- /* Translation table walk disabled for TTBR0 */
- return false;
- }
- *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
- }
- *table |= (address >> 18) & 0x3ffc;
- return true;
-}
-
-/* Translate a S1 pagetable walk through S2 if needed. */
-static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
- hwaddr addr, bool *is_secure,
- ARMMMUFaultInfo *fi)
-{
- if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
- !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
- target_ulong s2size;
- hwaddr s2pa;
- int s2prot;
- int ret;
- ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
- : ARMMMUIdx_Stage2;
- ARMCacheAttrs cacheattrs = {};
- MemTxAttrs txattrs = {};
-
- ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
- &s2pa, &txattrs, &s2prot, &s2size, fi,
- &cacheattrs);
- if (ret) {
- assert(fi->type != ARMFault_None);
- fi->s2addr = addr;
- fi->stage2 = true;
- fi->s1ptw = true;
- fi->s1ns = !*is_secure;
- return ~0;
- }
- if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
- (cacheattrs.attrs & 0xf0) == 0) {
- /*
- * PTW set and S1 walk touched S2 Device memory:
- * generate Permission fault.
- */
- fi->type = ARMFault_Permission;
- fi->s2addr = addr;
- fi->stage2 = true;
- fi->s1ptw = true;
- fi->s1ns = !*is_secure;
- return ~0;
- }
-
- if (arm_is_secure_below_el3(env)) {
- /* Check if page table walk is to secure or non-secure PA space. */
- if (*is_secure) {
- *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
- } else {
- *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
- }
- } else {
- assert(!*is_secure);
- }
-
- addr = s2pa;
- }
- return addr;
-}
-
-/* All loads done in the course of a page table walk go through here. */
-static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
- ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- MemTxAttrs attrs = {};
- MemTxResult result = MEMTX_OK;
- AddressSpace *as;
- uint32_t data;
-
- addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
- attrs.secure = is_secure;
- as = arm_addressspace(cs, attrs);
- if (fi->s1ptw) {
- return 0;
- }
- if (regime_translation_big_endian(env, mmu_idx)) {
- data = address_space_ldl_be(as, addr, attrs, &result);
- } else {
- data = address_space_ldl_le(as, addr, attrs, &result);
- }
- if (result == MEMTX_OK) {
- return data;
- }
- fi->type = ARMFault_SyncExternalOnWalk;
- fi->ea = arm_extabort_type(result);
- return 0;
-}
-
-static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
- ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- MemTxAttrs attrs = {};
- MemTxResult result = MEMTX_OK;
- AddressSpace *as;
- uint64_t data;
-
- addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
- attrs.secure = is_secure;
- as = arm_addressspace(cs, attrs);
- if (fi->s1ptw) {
- return 0;
- }
- if (regime_translation_big_endian(env, mmu_idx)) {
- data = address_space_ldq_be(as, addr, attrs, &result);
- } else {
- data = address_space_ldq_le(as, addr, attrs, &result);
- }
- if (result == MEMTX_OK) {
- return data;
- }
- fi->type = ARMFault_SyncExternalOnWalk;
- fi->ea = arm_extabort_type(result);
- return 0;
-}
-
-static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi)
-{
- CPUState *cs = env_cpu(env);
- int level = 1;
- uint32_t table;
- uint32_t desc;
- int type;
- int ap;
- int domain = 0;
- int domain_prot;
- hwaddr phys_addr;
- uint32_t dacr;
-
- /* Pagetable walk. */
- /* Lookup l1 descriptor. */
- if (!get_level1_table_address(env, mmu_idx, &table, address)) {
- /* Section translation fault if page walk is disabled by PD0 or PD1 */
- fi->type = ARMFault_Translation;
- goto do_fault;
- }
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fi);
- if (fi->type != ARMFault_None) {
- goto do_fault;
- }
- type = (desc & 3);
- domain = (desc >> 5) & 0x0f;
- if (regime_el(env, mmu_idx) == 1) {
- dacr = env->cp15.dacr_ns;
- } else {
- dacr = env->cp15.dacr_s;
- }
- domain_prot = (dacr >> (domain * 2)) & 3;
- if (type == 0) {
- /* Section translation fault. */
- fi->type = ARMFault_Translation;
- goto do_fault;
- }
- if (type != 2) {
- level = 2;
- }
- if (domain_prot == 0 || domain_prot == 2) {
- fi->type = ARMFault_Domain;
- goto do_fault;
- }
- if (type == 2) {
- /* 1Mb section. */
- phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
- ap = (desc >> 10) & 3;
- *page_size = 1024 * 1024;
- } else {
- /* Lookup l2 entry. */
- if (type == 1) {
- /* Coarse pagetable. */
- table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
- } else {
- /* Fine pagetable. */
- table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
- }
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fi);
- if (fi->type != ARMFault_None) {
- goto do_fault;
- }
- switch (desc & 3) {
- case 0: /* Page translation fault. */
- fi->type = ARMFault_Translation;
- goto do_fault;
- case 1: /* 64k page. */
- phys_addr = (desc & 0xffff0000) | (address & 0xffff);
- ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
- *page_size = 0x10000;
- break;
- case 2: /* 4k page. */
- phys_addr = (desc & 0xfffff000) | (address & 0xfff);
- ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
- *page_size = 0x1000;
- break;
- case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
- if (type == 1) {
- /* ARMv6/XScale extended small page format */
- if (arm_feature(env, ARM_FEATURE_XSCALE)
- || arm_feature(env, ARM_FEATURE_V6)) {
- phys_addr = (desc & 0xfffff000) | (address & 0xfff);
- *page_size = 0x1000;
- } else {
- /* UNPREDICTABLE in ARMv5; we choose to take a
- * page translation fault.
- */
- fi->type = ARMFault_Translation;
- goto do_fault;
- }
- } else {
- phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
- *page_size = 0x400;
- }
- ap = (desc >> 4) & 3;
- break;
- default:
- /* Never happens, but compiler isn't smart enough to tell. */
- abort();
- }
- }
- *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
- *prot |= *prot ? PAGE_EXEC : 0;
- if (!(*prot & (1 << access_type))) {
- /* Access permission fault. */
- fi->type = ARMFault_Permission;
- goto do_fault;
- }
- *phys_ptr = phys_addr;
- return false;
-do_fault:
- fi->domain = domain;
- fi->level = level;
- return true;
-}
-
-static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size, ARMMMUFaultInfo *fi)
-{
- CPUState *cs = env_cpu(env);
- ARMCPU *cpu = env_archcpu(env);
- int level = 1;
- uint32_t table;
- uint32_t desc;
- uint32_t xn;
- uint32_t pxn = 0;
- int type;
- int ap;
- int domain = 0;
- int domain_prot;
- hwaddr phys_addr;
- uint32_t dacr;
- bool ns;
-
- /* Pagetable walk. */
- /* Lookup l1 descriptor. */
- if (!get_level1_table_address(env, mmu_idx, &table, address)) {
- /* Section translation fault if page walk is disabled by PD0 or PD1 */
- fi->type = ARMFault_Translation;
- goto do_fault;
- }
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fi);
- if (fi->type != ARMFault_None) {
- goto do_fault;
- }
- type = (desc & 3);
- if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
- /* Section translation fault, or attempt to use the encoding
- * which is Reserved on implementations without PXN.
- */
- fi->type = ARMFault_Translation;
- goto do_fault;
- }
- if ((type == 1) || !(desc & (1 << 18))) {
- /* Page or Section. */
- domain = (desc >> 5) & 0x0f;
- }
- if (regime_el(env, mmu_idx) == 1) {
- dacr = env->cp15.dacr_ns;
- } else {
- dacr = env->cp15.dacr_s;
- }
- if (type == 1) {
- level = 2;
- }
- domain_prot = (dacr >> (domain * 2)) & 3;
- if (domain_prot == 0 || domain_prot == 2) {
- /* Section or Page domain fault */
- fi->type = ARMFault_Domain;
- goto do_fault;
- }
- if (type != 1) {
- if (desc & (1 << 18)) {
- /* Supersection. */
- phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
- phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
- phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
- *page_size = 0x1000000;
- } else {
- /* Section. */
- phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
- *page_size = 0x100000;
- }
- ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
- xn = desc & (1 << 4);
- pxn = desc & 1;
- ns = extract32(desc, 19, 1);
- } else {
- if (cpu_isar_feature(aa32_pxn, cpu)) {
- pxn = (desc >> 2) & 1;
- }
- ns = extract32(desc, 3, 1);
- /* Lookup l2 entry. */
- table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fi);
- if (fi->type != ARMFault_None) {
- goto do_fault;
- }
- ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
- switch (desc & 3) {
- case 0: /* Page translation fault. */
- fi->type = ARMFault_Translation;
- goto do_fault;
- case 1: /* 64k page. */
- phys_addr = (desc & 0xffff0000) | (address & 0xffff);
- xn = desc & (1 << 15);
- *page_size = 0x10000;
- break;
- case 2: case 3: /* 4k page. */
- phys_addr = (desc & 0xfffff000) | (address & 0xfff);
- xn = desc & 1;
- *page_size = 0x1000;
- break;
- default:
- /* Never happens, but compiler isn't smart enough to tell. */
- abort();
- }
- }
- if (domain_prot == 3) {
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- } else {
- if (pxn && !regime_is_user(env, mmu_idx)) {
- xn = 1;
- }
- if (xn && access_type == MMU_INST_FETCH) {
- fi->type = ARMFault_Permission;
- goto do_fault;
- }
-
- if (arm_feature(env, ARM_FEATURE_V6K) &&
- (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
- /* The simplified model uses AP[0] as an access control bit. */
- if ((ap & 1) == 0) {
- /* Access flag fault. */
- fi->type = ARMFault_AccessFlag;
- goto do_fault;
- }
- *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
- } else {
- *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
- }
- if (*prot && !xn) {
- *prot |= PAGE_EXEC;
- }
- if (!(*prot & (1 << access_type))) {
- /* Access permission fault. */
- fi->type = ARMFault_Permission;
- goto do_fault;
- }
- }
- if (ns) {
- /* The NS bit will (as required by the architecture) have no effect if
- * the CPU doesn't support TZ or this is a non-secure translation
- * regime, because the attribute will already be non-secure.
- */
- attrs->secure = false;
- }
- *phys_ptr = phys_addr;
- return false;
-do_fault:
- fi->domain = domain;
- fi->level = level;
- return true;
-}
-
-/*
- * check_s2_mmu_setup
- * @cpu: ARMCPU
- * @is_aa64: True if the translation regime is in AArch64 state
- * @startlevel: Suggested starting level
- * @inputsize: Bitsize of IPAs
- * @stride: Page-table stride (See the ARM ARM)
- *
- * Returns true if the suggested S2 translation parameters are OK and
- * false otherwise.
- */
-static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
- int inputsize, int stride)
-{
- const int grainsize = stride + 3;
- int startsizecheck;
-
- /* Negative levels are never allowed. */
- if (level < 0) {
- return false;
- }
-
- startsizecheck = inputsize - ((3 - level) * stride + grainsize);
- if (startsizecheck < 1 || startsizecheck > stride + 4) {
- return false;
- }
-
- if (is_aa64) {
- CPUARMState *env = &cpu->env;
- unsigned int pamax = arm_pamax(cpu);
-
- switch (stride) {
- case 13: /* 64KB Pages. */
- if (level == 0 || (level == 1 && pamax <= 42)) {
- return false;
- }
- break;
- case 11: /* 16KB Pages. */
- if (level == 0 || (level == 1 && pamax <= 40)) {
- return false;
- }
- break;
- case 9: /* 4KB Pages. */
- if (level == 0 && pamax <= 42) {
- return false;
- }
- break;
- default:
- g_assert_not_reached();
- }
-
- /* Inputsize checks. */
- if (inputsize > pamax &&
- (arm_el_is_aa64(env, 1) || inputsize > 40)) {
- /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
- return false;
- }
- } else {
- /* AArch32 only supports 4KB pages. Assert on that. */
- assert(stride == 9);
-
- if (level == 0) {
- return false;
- }
- }
- return true;
-}
-
-/* Translate from the 4-bit stage 2 representation of
- * memory attributes (without cache-allocation hints) to
- * the 8-bit representation of the stage 1 MAIR registers
- * (which includes allocation hints).
- *
- * ref: shared/translation/attrs/S2AttrDecode()
- * .../S2ConvertAttrsHints()
- */
-static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
-{
- uint8_t hiattr = extract32(s2attrs, 2, 2);
- uint8_t loattr = extract32(s2attrs, 0, 2);
- uint8_t hihint = 0, lohint = 0;
-
- if (hiattr != 0) { /* normal memory */
- if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
- hiattr = loattr = 1; /* non-cacheable */
- } else {
- if (hiattr != 1) { /* Write-through or write-back */
- hihint = 3; /* RW allocate */
- }
- if (loattr != 1) { /* Write-through or write-back */
- lohint = 3; /* RW allocate */
- }
- }
- }
-
- return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
-}
-#endif /* !CONFIG_USER_ONLY */
-
-static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
-{
- if (regime_has_2_ranges(mmu_idx)) {
- return extract64(tcr, 37, 2);
- } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- return 0; /* VTCR_EL2 */
- } else {
- /* Replicate the single TBI bit so we always have 2 bits. */
- return extract32(tcr, 20, 1) * 3;
- }
-}
-
-static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
-{
- if (regime_has_2_ranges(mmu_idx)) {
- return extract64(tcr, 51, 2);
- } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- return 0; /* VTCR_EL2 */
- } else {
- /* Replicate the single TBID bit so we always have 2 bits. */
- return extract32(tcr, 29, 1) * 3;
- }
-}
-
-static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
-{
- if (regime_has_2_ranges(mmu_idx)) {
- return extract64(tcr, 57, 2);
- } else {
- /* Replicate the single TCMA bit so we always have 2 bits. */
- return extract32(tcr, 30, 1) * 3;
- }
-}
-
-ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
- ARMMMUIdx mmu_idx, bool data)
-{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
- bool epd, hpd, using16k, using64k;
- int select, tsz, tbi, max_tsz;
-
- if (!regime_has_2_ranges(mmu_idx)) {
- select = 0;
- tsz = extract32(tcr, 0, 6);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- /* VTCR_EL2 */
- hpd = false;
- } else {
- hpd = extract32(tcr, 24, 1);
- }
- epd = false;
- } else {
- /*
- * Bit 55 is always between the two regions, and is canonical for
- * determining if address tagging is enabled.
- */
- select = extract64(va, 55, 1);
- if (!select) {
- tsz = extract32(tcr, 0, 6);
- epd = extract32(tcr, 7, 1);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
- hpd = extract64(tcr, 41, 1);
- } else {
- int tg = extract32(tcr, 30, 2);
- using16k = tg == 1;
- using64k = tg == 3;
- tsz = extract32(tcr, 16, 6);
- epd = extract32(tcr, 23, 1);
- hpd = extract64(tcr, 42, 1);
- }
- }
-
- if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
- max_tsz = 48 - using64k;
- } else {
- max_tsz = 39;
- }
-
- tsz = MIN(tsz, max_tsz);
- tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
-
- /* Present TBI as a composite with TBID. */
- tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
- if (!data) {
- tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
- }
- tbi = (tbi >> select) & 1;
-
- return (ARMVAParameters) {
- .tsz = tsz,
- .select = select,
- .tbi = tbi,
- .epd = epd,
- .hpd = hpd,
- .using16k = using16k,
- .using64k = using64k,
- };
-}
-
-#ifndef CONFIG_USER_ONLY
-static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
- ARMMMUIdx mmu_idx)
-{
- uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
- uint32_t el = regime_el(env, mmu_idx);
- int select, tsz;
- bool epd, hpd;
-
- assert(mmu_idx != ARMMMUIdx_Stage2_S);
-
- if (mmu_idx == ARMMMUIdx_Stage2) {
- /* VTCR */
- bool sext = extract32(tcr, 4, 1);
- bool sign = extract32(tcr, 3, 1);
-
- /*
- * If the sign-extend bit is not the same as t0sz[3], the result
- * is unpredictable. Flag this as a guest error.
- */
- if (sign != sext) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
- }
- tsz = sextract32(tcr, 0, 4) + 8;
- select = 0;
- hpd = false;
- epd = false;
- } else if (el == 2) {
- /* HTCR */
- tsz = extract32(tcr, 0, 3);
- select = 0;
- hpd = extract64(tcr, 24, 1);
- epd = false;
- } else {
- int t0sz = extract32(tcr, 0, 3);
- int t1sz = extract32(tcr, 16, 3);
-
- if (t1sz == 0) {
- select = va > (0xffffffffu >> t0sz);
- } else {
- /* Note that we will detect errors later. */
- select = va >= ~(0xffffffffu >> t1sz);
- }
- if (!select) {
- tsz = t0sz;
- epd = extract32(tcr, 7, 1);
- hpd = extract64(tcr, 41, 1);
- } else {
- tsz = t1sz;
- epd = extract32(tcr, 23, 1);
- hpd = extract64(tcr, 42, 1);
- }
- /* For aarch32, hpd0 is not enabled without t2e as well. */
- hpd &= extract32(tcr, 6, 1);
- }
-
- return (ARMVAParameters) {
- .tsz = tsz,
- .select = select,
- .epd = epd,
- .hpd = hpd,
- };
-}
-
-/**
- * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
- *
- * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
- * prot and page_size may not be filled in, and the populated fsr value
provides
- * information on why the translation aborted, in the format of a long-format
- * DFSR/IFSR fault register, with the following caveats:
- * * the WnR bit is never set (the caller must do this).
- *
- * @env: CPUARMState
- * @address: virtual address to get physical address for
- * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
- * @mmu_idx: MMU index indicating required translation regime
- * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
- * walk), must be true if this is stage 2 of a stage 1+2 walk for
an
- * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is
ignored.
- * @phys_ptr: set to the physical address corresponding to the virtual address
- * @attrs: set to the memory transaction attributes to use
- * @prot: set to the permissions for the page containing phys_ptr
- * @page_size_ptr: set to the size of the page containing phys_ptr
- * @fi: set to fault info if the translation fails
- * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
- */
-static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool s1_is_el0,
- hwaddr *phys_ptr, MemTxAttrs *txattrs, int
*prot,
- target_ulong *page_size_ptr,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
-{
- ARMCPU *cpu = env_archcpu(env);
- CPUState *cs = CPU(cpu);
- /* Read an LPAE long-descriptor translation table. */
- ARMFaultType fault_type = ARMFault_Translation;
- uint32_t level;
- ARMVAParameters param;
- uint64_t ttbr;
- hwaddr descaddr, indexmask, indexmask_grainsize;
- uint32_t tableattrs;
- target_ulong page_size;
- uint32_t attrs;
- int32_t stride;
- int addrsize, inputsize;
- TCR *tcr = regime_tcr(env, mmu_idx);
- int ap, ns, xn, pxn;
- uint32_t el = regime_el(env, mmu_idx);
- uint64_t descaddrmask;
- bool aarch64 = arm_el_is_aa64(env, el);
- bool guarded = false;
-
- /* TODO: This code does not support shareability levels. */
- if (aarch64) {
- param = aa64_va_parameters(env, address, mmu_idx,
- access_type != MMU_INST_FETCH);
- level = 0;
- addrsize = 64 - 8 * param.tbi;
- inputsize = 64 - param.tsz;
- } else {
- param = aa32_va_parameters(env, address, mmu_idx);
- level = 1;
- addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
- inputsize = addrsize - param.tsz;
- }
-
- /*
- * We determined the region when collecting the parameters, but we
- * have not yet validated that the address is valid for the region.
- * Extract the top bits and verify that they all match select.
- *
- * For aa32, if inputsize == addrsize, then we have selected the
- * region by exclusion in aa32_va_parameters and there is no more
- * validation to do here.
- */
- if (inputsize < addrsize) {
- target_ulong top_bits = sextract64(address, inputsize,
- addrsize - inputsize);
- if (-top_bits != param.select) {
- /* The gap between the two regions is a Translation fault */
- fault_type = ARMFault_Translation;
- goto do_fault;
- }
- }
-
- if (param.using64k) {
- stride = 13;
- } else if (param.using16k) {
- stride = 11;
- } else {
- stride = 9;
- }
-
- /* Note that QEMU ignores shareability and cacheability attributes,
- * so we don't need to do anything with the SH, ORGN, IRGN fields
- * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
- * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
- * implement any ASID-like capability so we can ignore it (instead
- * we will always flush the TLB any time the ASID is changed).
- */
- ttbr = regime_ttbr(env, mmu_idx, param.select);
-
- /* Here we should have set up all the parameters for the translation:
- * inputsize, ttbr, epd, stride, tbi
- */
-
- if (param.epd) {
- /* Translation table walk disabled => Translation fault on TLB miss
- * Note: This is always 0 on 64-bit EL2 and EL3.
- */
- goto do_fault;
- }
-
- if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
- /* The starting level depends on the virtual address size (which can
- * be up to 48 bits) and the translation granule size. It indicates
- * the number of strides (stride bits at a time) needed to
- * consume the bits of the input address. In the pseudocode this is:
- * level = 4 - RoundUp((inputsize - grainsize) / stride)
- * where their 'inputsize' is our 'inputsize', 'grainsize' is
- * our 'stride + 3' and 'stride' is our 'stride'.
- * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
- * = 4 - (inputsize - stride - 3 + stride - 1) / stride
- * = 4 - (inputsize - 4) / stride;
- */
- level = 4 - (inputsize - 4) / stride;
- } else {
- /* For stage 2 translations the starting level is specified by the
- * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
- */
- uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
- uint32_t startlevel;
- bool ok;
-
- if (!aarch64 || stride == 9) {
- /* AArch32 or 4KB pages */
- startlevel = 2 - sl0;
-
- if (cpu_isar_feature(aa64_st, cpu)) {
- startlevel &= 3;
- }
- } else {
- /* 16KB or 64KB pages */
- startlevel = 3 - sl0;
- }
-
- /* Check that the starting level is valid. */
- ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
- inputsize, stride);
- if (!ok) {
- fault_type = ARMFault_Translation;
- goto do_fault;
- }
- level = startlevel;
- }
-
- indexmask_grainsize = (1ULL << (stride + 3)) - 1;
- indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
-
- /* Now we can extract the actual base address from the TTBR */
- descaddr = extract64(ttbr, 0, 48);
- /*
- * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
- * and also to mask out CnP (bit 0) which could validly be non-zero.
- */
- descaddr &= ~indexmask;
-
- /* The address field in the descriptor goes up to bit 39 for ARMv7
- * but up to bit 47 for ARMv8, but we use the descaddrmask
- * up to bit 39 for AArch32, because we don't need other bits in that case
- * to construct next descriptor address (anyway they should be all zeroes).
- */
- descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
- ~indexmask_grainsize;
-
- /* Secure accesses start with the page table in secure memory and
- * can be downgraded to non-secure at any step. Non-secure accesses
- * remain non-secure. We implement this by just ORing in the NSTable/NS
- * bits at each step.
- */
- tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
- for (;;) {
- uint64_t descriptor;
- bool nstable;
-
- descaddr |= (address >> (stride * (4 - level))) & indexmask;
- descaddr &= ~7ULL;
- nstable = extract32(tableattrs, 4, 1);
- descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
- if (fi->type != ARMFault_None) {
- goto do_fault;
- }
-
- if (!(descriptor & 1) ||
- (!(descriptor & 2) && (level == 3))) {
- /* Invalid, or the Reserved level 3 encoding */
- goto do_fault;
- }
- descaddr = descriptor & descaddrmask;
-
- if ((descriptor & 2) && (level < 3)) {
- /* Table entry. The top five bits are attributes which may
- * propagate down through lower levels of the table (and
- * which are all arranged so that 0 means "no effect", so
- * we can gather them up by ORing in the bits at each level).
- */
- tableattrs |= extract64(descriptor, 59, 5);
- level++;
- indexmask = indexmask_grainsize;
- continue;
- }
- /* Block entry at level 1 or 2, or page entry at level 3.
- * These are basically the same thing, although the number
- * of bits we pull in from the vaddr varies.
- */
- page_size = (1ULL << ((stride * (4 - level)) + 3));
- descaddr |= (address & (page_size - 1));
- /* Extract attributes from the descriptor */
- attrs = extract64(descriptor, 2, 10)
- | (extract64(descriptor, 52, 12) << 10);
-
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- /* Stage 2 table descriptors do not include any attribute fields */
- break;
- }
- /* Merge in attributes from table descriptors */
- attrs |= nstable << 3; /* NS */
- guarded = extract64(descriptor, 50, 1); /* GP */
- if (param.hpd) {
- /* HPD disables all the table attributes except NSTable. */
- break;
- }
- attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
- /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
- * means "force PL1 access only", which means forcing AP[1] to 0.
- */
- attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
- attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
- break;
- }
- /* Here descaddr is the final physical address, and attributes
- * are all in attrs.
- */
- fault_type = ARMFault_AccessFlag;
- if ((attrs & (1 << 8)) == 0) {
- /* Access flag */
- goto do_fault;
- }
-
- ap = extract32(attrs, 4, 2);
-
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- ns = mmu_idx == ARMMMUIdx_Stage2;
- xn = extract32(attrs, 11, 2);
- *prot = get_S2prot(env, ap, xn, s1_is_el0);
- } else {
- ns = extract32(attrs, 3, 1);
- xn = extract32(attrs, 12, 1);
- pxn = extract32(attrs, 11, 1);
- *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
- }
-
- fault_type = ARMFault_Permission;
- if (!(*prot & (1 << access_type))) {
- goto do_fault;
- }
-
- if (ns) {
- /* The NS bit will (as required by the architecture) have no effect if
- * the CPU doesn't support TZ or this is a non-secure translation
- * regime, because the attribute will already be non-secure.
- */
- txattrs->secure = false;
- }
- /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
- if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
- arm_tlb_bti_gp(txattrs) = true;
- }
-
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
- } else {
- /* Index into MAIR registers for cache attributes */
- uint8_t attrindx = extract32(attrs, 0, 3);
- uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
- assert(attrindx <= 7);
- cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
- }
- cacheattrs->shareability = extract32(attrs, 6, 2);
-
- *phys_ptr = descaddr;
- *page_size_ptr = page_size;
- return false;
-
-do_fault:
- fi->type = fault_type;
- fi->level = level;
- /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
- fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
- mmu_idx == ARMMMUIdx_Stage2_S);
- fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
- return true;
-}
-
-static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
- ARMMMUIdx mmu_idx,
- int32_t address, int *prot)
-{
- if (!arm_feature(env, ARM_FEATURE_M)) {
- *prot = PAGE_READ | PAGE_WRITE;
- switch (address) {
- case 0xF0000000 ... 0xFFFFFFFF:
- if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
- /* hivecs execing is ok */
- *prot |= PAGE_EXEC;
- }
- break;
- case 0x00000000 ... 0x7FFFFFFF:
- *prot |= PAGE_EXEC;
- break;
- }
- } else {
- /* Default system address map for M profile cores.
- * The architecture specifies which regions are execute-never;
- * at the MPU level no other checks are defined.
- */
- switch (address) {
- case 0x00000000 ... 0x1fffffff: /* ROM */
- case 0x20000000 ... 0x3fffffff: /* SRAM */
- case 0x60000000 ... 0x7fffffff: /* RAM */
- case 0x80000000 ... 0x9fffffff: /* RAM */
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- break;
- case 0x40000000 ... 0x5fffffff: /* Peripheral */
- case 0xa0000000 ... 0xbfffffff: /* Device */
- case 0xc0000000 ... 0xdfffffff: /* Device */
- case 0xe0000000 ... 0xffffffff: /* System */
- *prot = PAGE_READ | PAGE_WRITE;
- break;
- default:
- g_assert_not_reached();
- }
- }
-}
-
-static bool pmsav7_use_background_region(ARMCPU *cpu,
- ARMMMUIdx mmu_idx, bool is_user)
-{
- /* Return true if we should use the default memory map as a
- * "background" region if there are no hits against any MPU regions.
- */
- CPUARMState *env = &cpu->env;
-
- if (is_user) {
- return false;
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
- & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
- } else {
- return regime_sctlr(env, mmu_idx) & SCTLR_BR;
- }
-}
-
-static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
-{
- /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff
*/
- return arm_feature(env, ARM_FEATURE_M) &&
- extract32(address, 20, 12) == 0xe00;
-}
-
-static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
-{
- /* True if address is in the M profile system region
- * 0xe0000000 - 0xffffffff
- */
- return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
-}
-
-static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi)
-{
- ARMCPU *cpu = env_archcpu(env);
- int n;
- bool is_user = regime_is_user(env, mmu_idx);
-
- *phys_ptr = address;
- *page_size = TARGET_PAGE_SIZE;
- *prot = 0;
-
- if (regime_translation_disabled(env, mmu_idx) ||
- m_is_ppb_region(env, address)) {
- /* MPU disabled or M profile PPB access: use default memory map.
- * The other case which uses the default memory map in the
- * v7M ARM ARM pseudocode is exception vector reads from the vector
- * table. In QEMU those accesses are done in arm_v7m_load_vector(),
- * which always does a direct read using address_space_ldl(), rather
- * than going via this function, so we don't need to check that here.
- */
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
- } else { /* MPU enabled */
- for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
- /* region search */
- uint32_t base = env->pmsav7.drbar[n];
- uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
- uint32_t rmask;
- bool srdis = false;
-
- if (!(env->pmsav7.drsr[n] & 0x1)) {
- continue;
- }
-
- if (!rsize) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "DRSR[%d]: Rsize field cannot be 0\n", n);
- continue;
- }
- rsize++;
- rmask = (1ull << rsize) - 1;
-
- if (base & rmask) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "DRBAR[%d]: 0x%" PRIx32 " misaligned "
- "to DRSR region size, mask = 0x%" PRIx32 "\n",
- n, base, rmask);
- continue;
- }
-
- if (address < base || address > base + rmask) {
- /*
- * Address not in this region. We must check whether the
- * region covers addresses in the same page as our address.
- * In that case we must not report a size that covers the
- * whole page for a subsequent hit against a different MPU
- * region or the background region, because it would result in
- * incorrect TLB hits for subsequent accesses to addresses that
- * are in this MPU region.
- */
- if (ranges_overlap(base, rmask,
- address & TARGET_PAGE_MASK,
- TARGET_PAGE_SIZE)) {
- *page_size = 1;
- }
- continue;
- }
-
- /* Region matched */
-
- if (rsize >= 8) { /* no subregions for regions < 256 bytes */
- int i, snd;
- uint32_t srdis_mask;
-
- rsize -= 3; /* sub region size (power of 2) */
- snd = ((address - base) >> rsize) & 0x7;
- srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
-
- srdis_mask = srdis ? 0x3 : 0x0;
- for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
- /* This will check in groups of 2, 4 and then 8, whether
- * the subregion bits are consistent. rsize is incremented
- * back up to give the region size, considering consistent
- * adjacent subregions as one region. Stop testing if rsize
- * is already big enough for an entire QEMU page.
- */
- int snd_rounded = snd & ~(i - 1);
- uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
- snd_rounded + 8, i);
- if (srdis_mask ^ srdis_multi) {
- break;
- }
- srdis_mask = (srdis_mask << i) | srdis_mask;
- rsize++;
- }
- }
- if (srdis) {
- continue;
- }
- if (rsize < TARGET_PAGE_BITS) {
- *page_size = 1 << rsize;
- }
- break;
- }
-
- if (n == -1) { /* no hits */
- if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
- /* background fault */
- fi->type = ARMFault_Background;
- return true;
- }
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
- } else { /* a MPU hit! */
- uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
- uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
-
- if (m_is_system_region(env, address)) {
- /* System space is always execute never */
- xn = 1;
- }
-
- if (is_user) { /* User mode AP bit decoding */
- switch (ap) {
- case 0:
- case 1:
- case 5:
- break; /* no access */
- case 3:
- *prot |= PAGE_WRITE;
- /* fall through */
- case 2:
- case 6:
- *prot |= PAGE_READ | PAGE_EXEC;
- break;
- case 7:
- /* for v7M, same as 6; for R profile a reserved value */
- if (arm_feature(env, ARM_FEATURE_M)) {
- *prot |= PAGE_READ | PAGE_EXEC;
- break;
- }
- /* fall through */
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "DRACR[%d]: Bad value for AP bits: 0x%"
- PRIx32 "\n", n, ap);
- }
- } else { /* Priv. mode AP bits decoding */
- switch (ap) {
- case 0:
- break; /* no access */
- case 1:
- case 2:
- case 3:
- *prot |= PAGE_WRITE;
- /* fall through */
- case 5:
- case 6:
- *prot |= PAGE_READ | PAGE_EXEC;
- break;
- case 7:
- /* for v7M, same as 6; for R profile a reserved value */
- if (arm_feature(env, ARM_FEATURE_M)) {
- *prot |= PAGE_READ | PAGE_EXEC;
- break;
- }
- /* fall through */
- default:
- qemu_log_mask(LOG_GUEST_ERROR,
- "DRACR[%d]: Bad value for AP bits: 0x%"
- PRIx32 "\n", n, ap);
- }
- }
-
- /* execute never */
- if (xn) {
- *prot &= ~PAGE_EXEC;
- }
- }
- }
-
- fi->type = ARMFault_Permission;
- fi->level = 1;
- return !(*prot & (1 << access_type));
-}
-
-static bool v8m_is_sau_exempt(CPUARMState *env,
- uint32_t address, MMUAccessType access_type)
-{
- /* The architecture specifies that certain address ranges are
- * exempt from v8M SAU/IDAU checks.
- */
- return
- (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
- (address >= 0xe0000000 && address <= 0xe0002fff) ||
- (address >= 0xe000e000 && address <= 0xe000efff) ||
- (address >= 0xe002e000 && address <= 0xe002efff) ||
- (address >= 0xe0040000 && address <= 0xe0041fff) ||
- (address >= 0xe00ff000 && address <= 0xe00fffff);
-}
-
-void v8m_security_lookup(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- V8M_SAttributes *sattrs)
-{
- /* Look up the security attributes for this address. Compare the
- * pseudocode SecurityCheck() function.
- * We assume the caller has zero-initialized *sattrs.
- */
- ARMCPU *cpu = env_archcpu(env);
- int r;
- bool idau_exempt = false, idau_ns = true, idau_nsc = true;
- int idau_region = IREGION_NOTVALID;
- uint32_t addr_page_base = address & TARGET_PAGE_MASK;
- uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
-
- if (cpu->idau) {
- IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
- IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
-
- iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
- &idau_nsc);
- }
-
- if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
- /* 0xf0000000..0xffffffff is always S for insn fetches */
- return;
- }
-
- if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
- sattrs->ns = !regime_is_secure(env, mmu_idx);
- return;
- }
-
- if (idau_region != IREGION_NOTVALID) {
- sattrs->irvalid = true;
- sattrs->iregion = idau_region;
- }
-
- switch (env->sau.ctrl & 3) {
- case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
- break;
- case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
- sattrs->ns = true;
- break;
- default: /* SAU.ENABLE == 1 */
- for (r = 0; r < cpu->sau_sregion; r++) {
- if (env->sau.rlar[r] & 1) {
- uint32_t base = env->sau.rbar[r] & ~0x1f;
- uint32_t limit = env->sau.rlar[r] | 0x1f;
-
- if (base <= address && limit >= address) {
- if (base > addr_page_base || limit < addr_page_limit) {
- sattrs->subpage = true;
- }
- if (sattrs->srvalid) {
- /* If we hit in more than one region then we must
report
- * as Secure, not NS-Callable, with no valid region
- * number info.
- */
- sattrs->ns = false;
- sattrs->nsc = false;
- sattrs->sregion = 0;
- sattrs->srvalid = false;
- break;
- } else {
- if (env->sau.rlar[r] & 2) {
- sattrs->nsc = true;
- } else {
- sattrs->ns = true;
- }
- sattrs->srvalid = true;
- sattrs->sregion = r;
- }
- } else {
- /*
- * Address not in this region. We must check whether the
- * region covers addresses in the same page as our address.
- * In that case we must not report a size that covers the
- * whole page for a subsequent hit against a different MPU
- * region or the background region, because it would result
- * in incorrect TLB hits for subsequent accesses to
- * addresses that are in this MPU region.
- */
- if (limit >= base &&
- ranges_overlap(base, limit - base + 1,
- addr_page_base,
- TARGET_PAGE_SIZE)) {
- sattrs->subpage = true;
- }
- }
- }
- }
- break;
- }
-
- /*
- * The IDAU will override the SAU lookup results if it specifies
- * higher security than the SAU does.
- */
- if (!idau_ns) {
- if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
- sattrs->ns = false;
- sattrs->nsc = idau_nsc;
- }
- }
-}
-
-bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
- int *prot, bool *is_subpage,
- ARMMMUFaultInfo *fi, uint32_t *mregion)
-{
- /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
- * that a full phys-to-virt translation does).
- * mregion is (if not NULL) set to the region number which matched,
- * or -1 if no region number is returned (MPU off, address did not
- * hit a region, address hit in multiple regions).
- * We set is_subpage to true if the region hit doesn't cover the
- * entire TARGET_PAGE the address is within.
- */
- ARMCPU *cpu = env_archcpu(env);
- bool is_user = regime_is_user(env, mmu_idx);
- uint32_t secure = regime_is_secure(env, mmu_idx);
- int n;
- int matchregion = -1;
- bool hit = false;
- uint32_t addr_page_base = address & TARGET_PAGE_MASK;
- uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
-
- *is_subpage = false;
- *phys_ptr = address;
- *prot = 0;
- if (mregion) {
- *mregion = -1;
- }
-
- /* Unlike the ARM ARM pseudocode, we don't need to check whether this
- * was an exception vector read from the vector table (which is always
- * done using the default system address map), because those accesses
- * are done in arm_v7m_load_vector(), which always does a direct
- * read using address_space_ldl(), rather than going via this function.
- */
- if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
- hit = true;
- } else if (m_is_ppb_region(env, address)) {
- hit = true;
- } else {
- if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
- hit = true;
- }
-
- for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
- /* region search */
- /* Note that the base address is bits [31:5] from the register
- * with bits [4:0] all zeroes, but the limit address is bits
- * [31:5] from the register with bits [4:0] all ones.
- */
- uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
- uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
-
- if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
- /* Region disabled */
- continue;
- }
-
- if (address < base || address > limit) {
- /*
- * Address not in this region. We must check whether the
- * region covers addresses in the same page as our address.
- * In that case we must not report a size that covers the
- * whole page for a subsequent hit against a different MPU
- * region or the background region, because it would result in
- * incorrect TLB hits for subsequent accesses to addresses that
- * are in this MPU region.
- */
- if (limit >= base &&
- ranges_overlap(base, limit - base + 1,
- addr_page_base,
- TARGET_PAGE_SIZE)) {
- *is_subpage = true;
- }
- continue;
- }
-
- if (base > addr_page_base || limit < addr_page_limit) {
- *is_subpage = true;
- }
-
- if (matchregion != -1) {
- /* Multiple regions match -- always a failure (unlike
- * PMSAv7 where highest-numbered-region wins)
- */
- fi->type = ARMFault_Permission;
- fi->level = 1;
- return true;
- }
-
- matchregion = n;
- hit = true;
- }
- }
-
- if (!hit) {
- /* background fault */
- fi->type = ARMFault_Background;
- return true;
- }
-
- if (matchregion == -1) {
- /* hit using the background region */
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
- } else {
- uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
- uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
- bool pxn = false;
-
- if (arm_feature(env, ARM_FEATURE_V8_1M)) {
- pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
- }
-
- if (m_is_system_region(env, address)) {
- /* System space is always execute never */
- xn = 1;
- }
-
- *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
- if (*prot && !xn && !(pxn && !is_user)) {
- *prot |= PAGE_EXEC;
- }
- /* We don't need to look the attribute up in the MAIR0/MAIR1
- * registers because that only tells us about cacheability.
- */
- if (mregion) {
- *mregion = matchregion;
- }
- }
-
- fi->type = ARMFault_Permission;
- fi->level = 1;
- return !(*prot & (1 << access_type));
-}
-
-
-static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
- int *prot, target_ulong *page_size,
- ARMMMUFaultInfo *fi)
-{
- uint32_t secure = regime_is_secure(env, mmu_idx);
- V8M_SAttributes sattrs = {};
- bool ret;
- bool mpu_is_subpage;
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
- if (access_type == MMU_INST_FETCH) {
- /* Instruction fetches always use the MMU bank and the
- * transaction attribute determined by the fetch address,
- * regardless of CPU state. This is painful for QEMU
- * to handle, because it would mean we need to encode
- * into the mmu_idx not just the (user, negpri) information
- * for the current security state but also that for the
- * other security state, which would balloon the number
- * of mmu_idx values needed alarmingly.
- * Fortunately we can avoid this because it's not actually
- * possible to arbitrarily execute code from memory with
- * the wrong security attribute: it will always generate
- * an exception of some kind or another, apart from the
- * special case of an NS CPU executing an SG instruction
- * in S&NSC memory. So we always just fail the translation
- * here and sort things out in the exception handler
- * (including possibly emulating an SG instruction).
- */
- if (sattrs.ns != !secure) {
- if (sattrs.nsc) {
- fi->type = ARMFault_QEMU_NSCExec;
- } else {
- fi->type = ARMFault_QEMU_SFault;
- }
- *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
- *phys_ptr = address;
- *prot = 0;
- return true;
- }
- } else {
- /* For data accesses we always use the MMU bank indicated
- * by the current CPU state, but the security attributes
- * might downgrade a secure access to nonsecure.
- */
- if (sattrs.ns) {
- txattrs->secure = false;
- } else if (!secure) {
- /* NS access to S memory must fault.
- * Architecturally we should first check whether the
- * MPU information for this address indicates that we
- * are doing an unaligned access to Device memory, which
- * should generate a UsageFault instead. QEMU does not
- * currently check for that kind of unaligned access though.
- * If we added it we would need to do so as a special case
- * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
- */
- fi->type = ARMFault_QEMU_SFault;
- *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
- *phys_ptr = address;
- *prot = 0;
- return true;
- }
- }
- }
-
- ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
- txattrs, prot, &mpu_is_subpage, fi, NULL);
- *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
- return ret;
-}
-
-static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, int *prot,
- ARMMMUFaultInfo *fi)
-{
- int n;
- uint32_t mask;
- uint32_t base;
- bool is_user = regime_is_user(env, mmu_idx);
-
- if (regime_translation_disabled(env, mmu_idx)) {
- /* MPU disabled. */
- *phys_ptr = address;
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return false;
- }
-
- *phys_ptr = address;
- for (n = 7; n >= 0; n--) {
- base = env->cp15.c6_region[n];
- if ((base & 1) == 0) {
- continue;
- }
- mask = 1 << ((base >> 1) & 0x1f);
- /* Keep this shift separate from the above to avoid an
- (undefined) << 32. */
- mask = (mask << 1) - 1;
- if (((base ^ address) & ~mask) == 0) {
- break;
- }
- }
- if (n < 0) {
- fi->type = ARMFault_Background;
- return true;
- }
-
- if (access_type == MMU_INST_FETCH) {
- mask = env->cp15.pmsav5_insn_ap;
- } else {
- mask = env->cp15.pmsav5_data_ap;
- }
- mask = (mask >> (n * 4)) & 0xf;
- switch (mask) {
- case 0:
- fi->type = ARMFault_Permission;
- fi->level = 1;
- return true;
- case 1:
- if (is_user) {
- fi->type = ARMFault_Permission;
- fi->level = 1;
- return true;
- }
- *prot = PAGE_READ | PAGE_WRITE;
- break;
- case 2:
- *prot = PAGE_READ;
- if (!is_user) {
- *prot |= PAGE_WRITE;
- }
- break;
- case 3:
- *prot = PAGE_READ | PAGE_WRITE;
- break;
- case 5:
- if (is_user) {
- fi->type = ARMFault_Permission;
- fi->level = 1;
- return true;
- }
- *prot = PAGE_READ;
- break;
- case 6:
- *prot = PAGE_READ;
- break;
- default:
- /* Bad permission. */
- fi->type = ARMFault_Permission;
- fi->level = 1;
- return true;
- }
- *prot |= PAGE_EXEC;
- return false;
-}
-
-/* Combine either inner or outer cacheability attributes for normal
- * memory, according to table D4-42 and pseudocode procedure
- * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
- *
- * NB: only stage 1 includes allocation hints (RW bits), leading to
- * some asymmetry.
- */
-static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
-{
- if (s1 == 4 || s2 == 4) {
- /* non-cacheable has precedence */
- return 4;
- } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
- /* stage 1 write-through takes precedence */
- return s1;
- } else if (extract32(s2, 2, 2) == 2) {
- /* stage 2 write-through takes precedence, but the allocation hint
- * is still taken from stage 1
- */
- return (2 << 2) | extract32(s1, 0, 2);
- } else { /* write-back */
- return s1;
- }
-}
-
-/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
- * and CombineS1S2Desc()
- *
- * @s1: Attributes from stage 1 walk
- * @s2: Attributes from stage 2 walk
- */
-static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
-{
- uint8_t s1lo, s2lo, s1hi, s2hi;
- ARMCacheAttrs ret;
- bool tagged = false;
-
- if (s1.attrs == 0xf0) {
- tagged = true;
- s1.attrs = 0xff;
- }
-
- s1lo = extract32(s1.attrs, 0, 4);
- s2lo = extract32(s2.attrs, 0, 4);
- s1hi = extract32(s1.attrs, 4, 4);
- s2hi = extract32(s2.attrs, 4, 4);
-
- /* Combine shareability attributes (table D4-43) */
- if (s1.shareability == 2 || s2.shareability == 2) {
- /* if either are outer-shareable, the result is outer-shareable */
- ret.shareability = 2;
- } else if (s1.shareability == 3 || s2.shareability == 3) {
- /* if either are inner-shareable, the result is inner-shareable */
- ret.shareability = 3;
- } else {
- /* both non-shareable */
- ret.shareability = 0;
- }
-
- /* Combine memory type and cacheability attributes */
- if (s1hi == 0 || s2hi == 0) {
- /* Device has precedence over normal */
- if (s1lo == 0 || s2lo == 0) {
- /* nGnRnE has precedence over anything */
- ret.attrs = 0;
- } else if (s1lo == 4 || s2lo == 4) {
- /* non-Reordering has precedence over Reordering */
- ret.attrs = 4; /* nGnRE */
- } else if (s1lo == 8 || s2lo == 8) {
- /* non-Gathering has precedence over Gathering */
- ret.attrs = 8; /* nGRE */
- } else {
- ret.attrs = 0xc; /* GRE */
- }
-
- /* Any location for which the resultant memory type is any
- * type of Device memory is always treated as Outer Shareable.
- */
- ret.shareability = 2;
- } else { /* Normal memory */
- /* Outer/inner cacheability combine independently */
- ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
- | combine_cacheattr_nibble(s1lo, s2lo);
-
- if (ret.attrs == 0x44) {
- /* Any location for which the resultant memory type is Normal
- * Inner Non-cacheable, Outer Non-cacheable is always treated
- * as Outer Shareable.
- */
- ret.shareability = 2;
- }
- }
-
- /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
- if (tagged && ret.attrs == 0xff) {
- ret.attrs = 0xf0;
- }
-
- return ret;
-}
-
-
-/* get_phys_addr - get the physical address for this virtual address
- *
- * Find the physical address corresponding to the given virtual address,
- * by doing a translation table walk on MMU based systems or using the
- * MPU state on MPU based systems.
- *
- * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
- * prot and page_size may not be filled in, and the populated fsr value
provides
- * information on why the translation aborted, in the format of a
- * DFSR/IFSR fault register, with the following caveats:
- * * we honour the short vs long DFSR format differences.
- * * the WnR bit is never set (the caller must do this).
- * * for PSMAv5 based systems we don't bother to return a full FSR format
- * value.
- *
- * @env: CPUARMState
- * @address: virtual address to get physical address for
- * @access_type: 0 for read, 1 for write, 2 for execute
- * @mmu_idx: MMU index indicating required translation regime
- * @phys_ptr: set to the physical address corresponding to the virtual address
- * @attrs: set to the memory transaction attributes to use
- * @prot: set to the permissions for the page containing phys_ptr
- * @page_size: set to the size of the page containing phys_ptr
- * @fi: set to fault info if the translation fails
- * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
- */
-bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
-{
- ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
-
- if (mmu_idx != s1_mmu_idx) {
- /* Call ourselves recursively to do the stage 1 and then stage 2
- * translations if mmu_idx is a two-stage regime.
- */
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- hwaddr ipa;
- int s2_prot;
- int ret;
- ARMCacheAttrs cacheattrs2 = {};
- ARMMMUIdx s2_mmu_idx;
- bool is_el0;
-
- ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
- attrs, prot, page_size, fi, cacheattrs);
-
- /* If S1 fails or S2 is disabled, return early. */
- if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
- *phys_ptr = ipa;
- return ret;
- }
-
- s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
- is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
-
- /* S1 is done. Now do S2 translation. */
- ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
- phys_ptr, attrs, &s2_prot,
- page_size, fi, &cacheattrs2);
- fi->s2addr = ipa;
- /* Combine the S1 and S2 perms. */
- *prot &= s2_prot;
-
- /* If S2 fails, return early. */
- if (ret) {
- return ret;
- }
-
- /* Combine the S1 and S2 cache attributes. */
- if (arm_hcr_el2_eff(env) & HCR_DC) {
- /*
- * HCR.DC forces the first stage attributes to
- * Normal Non-Shareable,
- * Inner Write-Back Read-Allocate Write-Allocate,
- * Outer Write-Back Read-Allocate Write-Allocate.
- * Do not overwrite Tagged within attrs.
- */
- if (cacheattrs->attrs != 0xf0) {
- cacheattrs->attrs = 0xff;
- }
- cacheattrs->shareability = 0;
- }
- *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
-
- /* Check if IPA translates to secure or non-secure PA space. */
- if (arm_is_secure_below_el3(env)) {
- if (attrs->secure) {
- attrs->secure =
- !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
- } else {
- attrs->secure =
- !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
- || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA));
- }
- }
- return 0;
- } else {
- /*
- * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
- */
- mmu_idx = stage_1_mmu_idx(mmu_idx);
- }
- }
-
- /* The page table entries may downgrade secure to non-secure, but
- * cannot upgrade an non-secure translation regime's attributes
- * to secure.
- */
- attrs->secure = regime_is_secure(env, mmu_idx);
- attrs->user = regime_is_user(env, mmu_idx);
-
- /* Fast Context Switch Extension. This doesn't exist at all in v8.
- * In v7 and earlier it affects all stage 1 translations.
- */
- if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
- && !arm_feature(env, ARM_FEATURE_V8)) {
- if (regime_el(env, mmu_idx) == 3) {
- address += env->cp15.fcseidr_s;
- } else {
- address += env->cp15.fcseidr_ns;
- }
- }
-
- if (arm_feature(env, ARM_FEATURE_PMSA)) {
- bool ret;
- *page_size = TARGET_PAGE_SIZE;
-
- if (arm_feature(env, ARM_FEATURE_V8)) {
- /* PMSAv8 */
- ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
- phys_ptr, attrs, prot, page_size, fi);
- } else if (arm_feature(env, ARM_FEATURE_V7)) {
- /* PMSAv7 */
- ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
- phys_ptr, prot, page_size, fi);
- } else {
- /* Pre-v7 MPU */
- ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
- phys_ptr, prot, fi);
- }
- qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
- " mmu_idx %u -> %s (prot %c%c%c)\n",
- access_type == MMU_DATA_LOAD ? "reading" :
- (access_type == MMU_DATA_STORE ? "writing" : "execute"),
- (uint32_t)address, mmu_idx,
- ret ? "Miss" : "Hit",
- *prot & PAGE_READ ? 'r' : '-',
- *prot & PAGE_WRITE ? 'w' : '-',
- *prot & PAGE_EXEC ? 'x' : '-');
-
- return ret;
- }
-
- /* Definitely a real MMU, not an MPU */
-
- if (regime_translation_disabled(env, mmu_idx)) {
- uint64_t hcr;
- uint8_t memattr;
-
- /*
- * MMU disabled. S1 addresses within aa64 translation regimes are
- * still checked for bounds -- see AArch64.TranslateAddressS1Off.
- */
- if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
- int r_el = regime_el(env, mmu_idx);
- if (arm_el_is_aa64(env, r_el)) {
- int pamax = arm_pamax(env_archcpu(env));
- uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
- int addrtop, tbi;
-
- tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
- if (access_type == MMU_INST_FETCH) {
- tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
- }
- tbi = (tbi >> extract64(address, 55, 1)) & 1;
- addrtop = (tbi ? 55 : 63);
-
- if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
- fi->type = ARMFault_AddressSize;
- fi->level = 0;
- fi->stage2 = false;
- return 1;
- }
-
- /*
- * When TBI is disabled, we've just validated that all of the
- * bits above PAMax are zero, so logically we only need to
- * clear the top byte for TBI. But it's clearer to follow
- * the pseudocode set of addrdesc.paddress.
- */
- address = extract64(address, 0, 52);
- }
- }
- *phys_ptr = address;
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- *page_size = TARGET_PAGE_SIZE;
-
- /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
- hcr = arm_hcr_el2_eff(env);
- cacheattrs->shareability = 0;
- if (hcr & HCR_DC) {
- if (hcr & HCR_DCT) {
- memattr = 0xf0; /* Tagged, Normal, WB, RWA */
- } else {
- memattr = 0xff; /* Normal, WB, RWA */
- }
- } else if (access_type == MMU_INST_FETCH) {
- if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
- memattr = 0xee; /* Normal, WT, RA, NT */
- } else {
- memattr = 0x44; /* Normal, NC, No */
- }
- cacheattrs->shareability = 2; /* outer sharable */
- } else {
- memattr = 0x00; /* Device, nGnRnE */
- }
- cacheattrs->attrs = memattr;
- return 0;
- }
-
- if (regime_using_lpae_format(env, mmu_idx)) {
- return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
- phys_ptr, attrs, prot, page_size,
- fi, cacheattrs);
- } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
- return get_phys_addr_v6(env, address, access_type, mmu_idx,
- phys_ptr, attrs, prot, page_size, fi);
- } else {
- return get_phys_addr_v5(env, address, access_type, mmu_idx,
- phys_ptr, prot, page_size, fi);
- }
-}
-
-hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
- MemTxAttrs *attrs)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- hwaddr phys_addr;
- target_ulong page_size;
- int prot;
- bool ret;
- ARMMMUFaultInfo fi = {};
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- ARMCacheAttrs cacheattrs = {};
-
- *attrs = (MemTxAttrs) {};
-
- ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
- attrs, &prot, &page_size, &fi, &cacheattrs);
-
- if (ret) {
- return -1;
- }
- return phys_addr;
-}
-
-#endif
-
/* Note that signed overflow is undefined in C. The following routines are
careful to use unsigned types where modulo arithmetic is required.
Failure to do so _will_ break on newer gcc. */
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index d63ae465e1..23f8dfd70e 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -33,6 +33,7 @@
#include "exec/cpu_ldst.h"
#include "semihosting/common-semi.h"
#endif
+#include "cpu-mmu.h"
static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
uint32_t reg, uint32_t val)
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
index cd6df18150..11021d1a2f 100644
--- a/target/arm/tcg/pauth_helper.c
+++ b/target/arm/tcg/pauth_helper.c
@@ -25,7 +25,7 @@
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "qemu/xxhash.h"
-
+#include "cpu-mmu.h"
static uint64_t pac_cell_shuffle(uint64_t i)
{
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 9609333cbd..1b3a63c1da 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -9,6 +9,7 @@
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
+#include "cpu-mmu.h"
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
unsigned int target_el,
diff --git a/target/arm/meson.build b/target/arm/meson.build
index e6448a9007..765df57fa0 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -3,6 +3,7 @@ arm_ss.add(files(
'cpu.c',
'gdbstub.c',
'cpu_tcg.c',
+ 'cpu-mmu.c',
))
arm_ss.add(zlib)
@@ -20,6 +21,7 @@ arm_softmmu_ss.add(files(
'machine.c',
'monitor.c',
'cpu-sysemu.c',
+ 'cpu-mmu-sysemu.c',
))
arm_softmmu_ss.add(when: 'CONFIG_TCG', if_true: files(
--
2.26.2
- [RFC v9 00/50] arm cleanup experiment for kvm-only build, Claudio Fontana, 2021/03/17
- [RFC v9 03/50] arm: tcg: only build under CONFIG_TCG, Claudio Fontana, 2021/03/17
- [RFC v9 04/50] target/arm: tcg: add sysemu and user subsirs, Claudio Fontana, 2021/03/17
- [RFC v9 02/50] target/arm: move helpers to tcg/, Claudio Fontana, 2021/03/17
- [RFC v9 07/50] target/arm: move physical address translation to cpu-mmu,
Claudio Fontana <=
- [RFC v9 05/50] target/arm: only build psci for TCG, Claudio Fontana, 2021/03/17
- [RFC v9 10/50] target/arm: cpregs: fix style (mostly just comments), Claudio Fontana, 2021/03/17
- [RFC v9 01/50] target/arm: move translate modules to tcg/, Claudio Fontana, 2021/03/17
- [RFC v9 12/50] target/arm: only perform TCG cpu and machine inits if TCG enabled, Claudio Fontana, 2021/03/17
- [RFC v9 06/50] target/arm: split off cpu-sysemu.c, Claudio Fontana, 2021/03/17
- [RFC v9 11/50] target/arm: move cpu definitions to common cpu module, Claudio Fontana, 2021/03/17
- [RFC v9 16/50] target/arm: split vfp state setting from tcg helpers, Claudio Fontana, 2021/03/17
- [RFC v9 19/50] target/arm: move arm_sctlr away from tcg helpers, Claudio Fontana, 2021/03/17
- [RFC v9 08/50] target/arm: cpu-mmu: fix comment style, Claudio Fontana, 2021/03/17
- [RFC v9 15/50] target/arm: add temporary stub for arm_rebuild_hflags, Claudio Fontana, 2021/03/17