[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v2 13/23] target-arm: Split TLB for secure state
From: |
Sergey Fedorov |
Subject: |
Re: [Qemu-devel] [PATCH v2 13/23] target-arm: Split TLB for secure state and EL3 in Aarch64 |
Date: |
Wed, 14 May 2014 10:15:53 +0400 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Thunderbird/24.5.0 |
On 13.05.2014 20:15, Fabian Aggeler wrote:
> This patch is based on ideas found in a patch at
> git://github.com/jowinter/qemu-trustzone.git
> a9ad01767c4b25e14700b5682a412f4fd8146ee8 by
> Johannes Winter <address@hidden>.
>
> Each world (secure and non-secure) has its own MMU state. Providing
> a separate TLB for each state prevents flushing it on each transition
> from secure to non-secure world and vice versa.
>
> For EL3 in Aarch64 state another MMU state is introduced since
> EL3 will be able to configure its own translation regime.
>
> Do not use IS_USER() macro anymore as MMU index in translation
> code. Use new MEM_INDEX() and MEM_INDEX_USER() macros instead.
>
> Signed-off-by: Sergey Fedorov <address@hidden>
> Signed-off-by: Fabian Aggeler <address@hidden>
> ---
> target-arm/cpu.h | 42 +++++++-
> target-arm/helper.c | 2 +-
> target-arm/translate-a64.c | 9 +-
> target-arm/translate.c | 247
> +++++++++++++++++++++++----------------------
> target-arm/translate.h | 1 +
> 5 files changed, 176 insertions(+), 125 deletions(-)
>
> diff --git a/target-arm/cpu.h b/target-arm/cpu.h
> index 76c9e90..a970d55 100644
> --- a/target-arm/cpu.h
> +++ b/target-arm/cpu.h
> @@ -91,7 +91,7 @@ typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
>
> struct arm_boot_info;
>
> -#define NB_MMU_MODES 2
> +#define NB_MMU_MODES 5
>
> /* We currently assume float and double are IEEE single and double
> precision respectively.
> @@ -1104,10 +1104,43 @@ static inline CPUARMState *cpu_init(const char
> *cpu_model)
> /* MMU modes definitions */
> #define MMU_MODE0_SUFFIX _kernel
> #define MMU_MODE1_SUFFIX _user
> -#define MMU_USER_IDX 1
> +#define MMU_MODE2_SUFFIX _secure_kernel
> +#define MMU_MODE3_SUFFIX _secure_user
> +#define MMU_MODE4_SUFFIX _el3
> +#define MMU_USER_BIT 1
> +#define MMU_SECURE_BIT (1 << 1)
> +#define MMU_EL3_BIT (1 << 2)
> +#define MMU_KERN_IDX (0)
> +#define MMU_USER_IDX (MMU_USER_BIT)
> +#define MMU_SECURE_KERN_IDX (MMU_SECURE_BIT | MMU_KERN_IDX)
> +#define MMU_SECURE_USER_IDX (MMU_SECURE_BIT | MMU_USER_IDX)
> static inline int cpu_mmu_index (CPUARMState *env)
> {
> - return arm_current_pl(env) ? 0 : 1;
> + int mmu_index = 0;
> + int current_pl;
> + /* Security Extensions introduce two separate virtual MMUs for each CPU,
> + * one each for secure and non-secure world. If EL3 is in Aarch64 state
> it
> + * gets its own MMU which it can configure through the SCTLR_EL3
> register.*/
> +
> + current_pl = arm_current_pl(env);
> + if (current_pl == 3 && arm_el_is_aa64(env, 3)) {
> + /* Bit 3: 1 for EL3 in Aarch64 state */
> + return 1 << 3;
> + } else {
> + /* Bit 0: 0 for PL1/2/3, 1 for PL0
> + * Bit 1: 0 for non-secure, 1 for secure
> + */
> + if (arm_is_secure(env)) {
> + mmu_index |= MMU_SECURE_BIT;
> + }
> +
> + if (current_pl == 0) {
> + mmu_index |= MMU_USER_BIT;
> + }
> +
> + return mmu_index;
> + }
> +
> }
>
> #include "exec/cpu-all.h"
> @@ -1182,6 +1215,9 @@ static inline void cpu_get_tb_cpu_state(CPUARMState
> *env, target_ulong *pc,
> if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
> *flags |= ARM_TBFLAG_AA64_FPEN_MASK;
> }
> + if (!arm_is_secure(env)) {
> + *flags |= ARM_TBFLAG_NS_MASK;
> + }
> } else {
> int privmode;
> *pc = env->regs[15];
> diff --git a/target-arm/helper.c b/target-arm/helper.c
> index 00dc4af..9326ef8 100644
> --- a/target-arm/helper.c
> +++ b/target-arm/helper.c
> @@ -4079,7 +4079,7 @@ int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr
> address,
> uint32_t syn;
> bool same_el = (arm_current_pl(env) != 0);
>
> - is_user = mmu_idx == MMU_USER_IDX;
> + is_user = mmu_idx & MMU_USER_BIT;
> ret = get_phys_addr(env, address, access_type, is_user, &phys_addr,
> &prot,
> &page_size);
> if (ret == 0) {
> diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
> index b62db4d..1b4c932 100644
> --- a/target-arm/translate-a64.c
> +++ b/target-arm/translate-a64.c
> @@ -167,7 +167,13 @@ static int get_mem_index(DisasContext *s)
> #ifdef CONFIG_USER_ONLY
> return 1;
> #else
> - return s->user;
> + /* EL3 in Aarch64 state has its own MMU */
> + if (s->current_pl == 3) {
> + return MMU_EL3_BIT;
> + } else {
> + return (s->user ? MMU_USER_BIT : 0) |
> + (s->ns ? 0 : MMU_SECURE_BIT);
> + }
> #endif
> }
>
> @@ -10661,6 +10667,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
> dc->condexec_cond = 0;
> #if !defined(CONFIG_USER_ONLY)
> dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0);
> + dc->ns = ARM_TBFLAG_NS(tb->flags);
This hank is better to be placed into the patch 9.
> #endif
> dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
> dc->vec_len = 0;
> diff --git a/target-arm/translate.c b/target-arm/translate.c
> index 4ebd9f7..bbd4c77 100644
> --- a/target-arm/translate.c
> +++ b/target-arm/translate.c
> @@ -54,9 +54,13 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
> #if defined(CONFIG_USER_ONLY)
> #define IS_USER(s) 1
> #define IS_NS(s) 1
> +#define MEM_INDEX(s) MMU_USER_IDX
> +#define MEM_INDEX_USER(S) MMU_USER_IDX
> #else
> #define IS_USER(s) (s->user)
> #define IS_NS(s) (s->ns)
> +#define MEM_INDEX(s) (s->mem_idx)
> +#define MEM_INDEX_USER(S) (MEM_INDEX(s) | MMU_USER_BIT)
> #endif
>
> TCGv_ptr cpu_env;
> @@ -1167,18 +1171,18 @@ VFP_GEN_FIX(ulto, )
> static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
> {
> if (dp) {
> - gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
> + gen_aa32_ld64(cpu_F0d, addr, MEM_INDEX(s));
> } else {
> - gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
> + gen_aa32_ld32u(cpu_F0s, addr, MEM_INDEX(s));
> }
> }
>
> static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
> {
> if (dp) {
> - gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
> + gen_aa32_st64(cpu_F0d, addr, MEM_INDEX(s));
> } else {
> - gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
> + gen_aa32_st32(cpu_F0s, addr, MEM_INDEX(s));
> }
> }
>
> @@ -1516,24 +1520,24 @@ static int disas_iwmmxt_insn(CPUARMState *env,
> DisasContext *s, uint32_t insn)
> if (insn & ARM_CP_RW_BIT) {
> if ((insn >> 28) == 0xf) { /* WLDRW wCx */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> iwmmxt_store_creg(wrd, tmp);
> } else {
> i = 1;
> if (insn & (1 << 8)) {
> if (insn & (1 << 22)) { /* WLDRD */
> - gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
> + gen_aa32_ld64(cpu_M0, addr, MEM_INDEX(s));
> i = 0;
> } else { /* WLDRW wRd */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> }
> } else {
> tmp = tcg_temp_new_i32();
> if (insn & (1 << 22)) { /* WLDRH */
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> } else { /* WLDRB */
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> }
> }
> if (i) {
> @@ -1545,24 +1549,24 @@ static int disas_iwmmxt_insn(CPUARMState *env,
> DisasContext *s, uint32_t insn)
> } else {
> if ((insn >> 28) == 0xf) { /* WSTRW wCx */
> tmp = iwmmxt_load_creg(wrd);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> } else {
> gen_op_iwmmxt_movq_M0_wRn(wrd);
> tmp = tcg_temp_new_i32();
> if (insn & (1 << 8)) {
> if (insn & (1 << 22)) { /* WSTRD */
> - gen_aa32_st64(cpu_M0, addr, IS_USER(s));
> + gen_aa32_st64(cpu_M0, addr, MEM_INDEX(s));
> } else { /* WSTRW wRd */
> tcg_gen_trunc_i64_i32(tmp, cpu_M0);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> }
> } else {
> if (insn & (1 << 22)) { /* WSTRH */
> tcg_gen_trunc_i64_i32(tmp, cpu_M0);
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> } else { /* WSTRB */
> tcg_gen_trunc_i64_i32(tmp, cpu_M0);
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> }
> }
> }
> @@ -2627,15 +2631,15 @@ static TCGv_i32 gen_load_and_replicate(DisasContext
> *s, TCGv_i32 addr, int size)
> TCGv_i32 tmp = tcg_temp_new_i32();
> switch (size) {
> case 0:
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> gen_neon_dup_u8(tmp, 0);
> break;
> case 1:
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> gen_neon_dup_low16(tmp);
> break;
> case 2:
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> break;
> default: /* Avoid compiler warnings. */
> abort();
> @@ -4306,11 +4310,11 @@ static int disas_neon_ls_insn(CPUARMState * env,
> DisasContext *s, uint32_t insn)
> if (size == 3) {
> tmp64 = tcg_temp_new_i64();
> if (load) {
> - gen_aa32_ld64(tmp64, addr, IS_USER(s));
> + gen_aa32_ld64(tmp64, addr, MEM_INDEX(s));
> neon_store_reg64(tmp64, rd);
> } else {
> neon_load_reg64(tmp64, rd);
> - gen_aa32_st64(tmp64, addr, IS_USER(s));
> + gen_aa32_st64(tmp64, addr, MEM_INDEX(s));
> }
> tcg_temp_free_i64(tmp64);
> tcg_gen_addi_i32(addr, addr, stride);
> @@ -4319,21 +4323,21 @@ static int disas_neon_ls_insn(CPUARMState * env,
> DisasContext *s, uint32_t insn)
> if (size == 2) {
> if (load) {
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> neon_store_reg(rd, pass, tmp);
> } else {
> tmp = neon_load_reg(rd, pass);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_gen_addi_i32(addr, addr, stride);
> } else if (size == 1) {
> if (load) {
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> tcg_gen_addi_i32(addr, addr, stride);
> tmp2 = tcg_temp_new_i32();
> - gen_aa32_ld16u(tmp2, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp2, addr, MEM_INDEX(s));
> tcg_gen_addi_i32(addr, addr, stride);
> tcg_gen_shli_i32(tmp2, tmp2, 16);
> tcg_gen_or_i32(tmp, tmp, tmp2);
> @@ -4343,10 +4347,10 @@ static int disas_neon_ls_insn(CPUARMState * env,
> DisasContext *s, uint32_t insn)
> tmp = neon_load_reg(rd, pass);
> tmp2 = tcg_temp_new_i32();
> tcg_gen_shri_i32(tmp2, tmp, 16);
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> tcg_gen_addi_i32(addr, addr, stride);
> - gen_aa32_st16(tmp2, addr, IS_USER(s));
> + gen_aa32_st16(tmp2, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp2);
> tcg_gen_addi_i32(addr, addr, stride);
> }
> @@ -4355,7 +4359,7 @@ static int disas_neon_ls_insn(CPUARMState * env,
> DisasContext *s, uint32_t insn)
> TCGV_UNUSED_I32(tmp2);
> for (n = 0; n < 4; n++) {
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> tcg_gen_addi_i32(addr, addr, stride);
> if (n == 0) {
> tmp2 = tmp;
> @@ -4375,7 +4379,7 @@ static int disas_neon_ls_insn(CPUARMState * env,
> DisasContext *s, uint32_t insn)
> } else {
> tcg_gen_shri_i32(tmp, tmp2, n * 8);
> }
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> tcg_gen_addi_i32(addr, addr, stride);
> }
> @@ -4499,13 +4503,13 @@ static int disas_neon_ls_insn(CPUARMState * env,
> DisasContext *s, uint32_t insn)
> tmp = tcg_temp_new_i32();
> switch (size) {
> case 0:
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> break;
> case 1:
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> break;
> case 2:
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> break;
> default: /* Avoid compiler warnings. */
> abort();
> @@ -4523,13 +4527,13 @@ static int disas_neon_ls_insn(CPUARMState * env,
> DisasContext *s, uint32_t insn)
> tcg_gen_shri_i32(tmp, tmp, shift);
> switch (size) {
> case 0:
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> break;
> case 1:
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> break;
> case 2:
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> break;
> }
> tcg_temp_free_i32(tmp);
> @@ -7202,14 +7206,14 @@ static void gen_load_exclusive(DisasContext *s, int
> rt, int rt2,
>
> switch (size) {
> case 0:
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> break;
> case 1:
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> break;
> case 2:
> case 3:
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> break;
> default:
> abort();
> @@ -7220,7 +7224,7 @@ static void gen_load_exclusive(DisasContext *s, int rt,
> int rt2,
> TCGv_i32 tmp3 = tcg_temp_new_i32();
>
> tcg_gen_addi_i32(tmp2, addr, 4);
> - gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
> + gen_aa32_ld32u(tmp3, tmp2, MEM_INDEX(s));
> tcg_temp_free_i32(tmp2);
> tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
> store_reg(s, rt2, tmp3);
> @@ -7271,14 +7275,14 @@ static void gen_store_exclusive(DisasContext *s, int
> rd, int rt, int rt2,
> tmp = tcg_temp_new_i32();
> switch (size) {
> case 0:
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> break;
> case 1:
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> break;
> case 2:
> case 3:
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> break;
> default:
> abort();
> @@ -7289,7 +7293,7 @@ static void gen_store_exclusive(DisasContext *s, int
> rd, int rt, int rt2,
> TCGv_i32 tmp2 = tcg_temp_new_i32();
> TCGv_i32 tmp3 = tcg_temp_new_i32();
> tcg_gen_addi_i32(tmp2, addr, 4);
> - gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
> + gen_aa32_ld32u(tmp3, tmp2, MEM_INDEX(s));
> tcg_temp_free_i32(tmp2);
> tcg_gen_concat_i32_i64(val64, tmp, tmp3);
> tcg_temp_free_i32(tmp3);
> @@ -7304,14 +7308,14 @@ static void gen_store_exclusive(DisasContext *s, int
> rd, int rt, int rt2,
> tmp = load_reg(s, rt);
> switch (size) {
> case 0:
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> break;
> case 1:
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> break;
> case 2:
> case 3:
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> break;
> default:
> abort();
> @@ -7320,7 +7324,7 @@ static void gen_store_exclusive(DisasContext *s, int
> rd, int rt, int rt2,
> if (size == 3) {
> tcg_gen_addi_i32(addr, addr, 4);
> tmp = load_reg(s, rt2);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_gen_movi_i32(cpu_R[rd], 0);
> @@ -7367,11 +7371,11 @@ static void gen_srs(DisasContext *s,
> }
> tcg_gen_addi_i32(addr, addr, offset);
> tmp = load_reg(s, 14);
> - gen_aa32_st32(tmp, addr, 0);
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> tmp = load_cpu_field(spsr);
> tcg_gen_addi_i32(addr, addr, 4);
> - gen_aa32_st32(tmp, addr, 0);
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> if (writeback) {
> switch (amode) {
> @@ -7524,10 +7528,10 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> tcg_gen_addi_i32(addr, addr, offset);
> /* Load PC into tmp and CPSR into tmp2. */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, 0);
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> tcg_gen_addi_i32(addr, addr, 4);
> tmp2 = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp2, addr, 0);
> + gen_aa32_ld32u(tmp2, addr, MEM_INDEX(s));
> if (insn & (1 << 21)) {
> /* Base writeback. */
> switch (i) {
> @@ -8116,13 +8120,13 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> tmp = tcg_temp_new_i32();
> switch (op1) {
> case 0: /* lda */
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> break;
> case 2: /* ldab */
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> break;
> case 3: /* ldah */
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> break;
> default:
> abort();
> @@ -8133,13 +8137,13 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> tmp = load_reg(s, rm);
> switch (op1) {
> case 0: /* stl */
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> break;
> case 2: /* stlb */
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> break;
> case 3: /* stlh */
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> break;
> default:
> abort();
> @@ -8194,11 +8198,11 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> tmp = load_reg(s, rm);
> tmp2 = tcg_temp_new_i32();
> if (insn & (1 << 22)) {
> - gen_aa32_ld8u(tmp2, addr, IS_USER(s));
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp2, addr, MEM_INDEX(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> } else {
> - gen_aa32_ld32u(tmp2, addr, IS_USER(s));
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp2, addr, MEM_INDEX(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> }
> tcg_temp_free_i32(tmp);
> tcg_temp_free_i32(addr);
> @@ -8220,14 +8224,14 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> tmp = tcg_temp_new_i32();
> switch(sh) {
> case 1:
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> break;
> case 2:
> - gen_aa32_ld8s(tmp, addr, IS_USER(s));
> + gen_aa32_ld8s(tmp, addr, MEM_INDEX(s));
> break;
> default:
> case 3:
> - gen_aa32_ld16s(tmp, addr, IS_USER(s));
> + gen_aa32_ld16s(tmp, addr, MEM_INDEX(s));
> break;
> }
> load = 1;
> @@ -8237,21 +8241,21 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> if (sh & 1) {
> /* store */
> tmp = load_reg(s, rd);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> tcg_gen_addi_i32(addr, addr, 4);
> tmp = load_reg(s, rd + 1);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> load = 0;
> } else {
> /* load */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> store_reg(s, rd, tmp);
> tcg_gen_addi_i32(addr, addr, 4);
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> rd++;
> load = 1;
> }
> @@ -8259,7 +8263,7 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> } else {
> /* store */
> tmp = load_reg(s, rd);
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> load = 0;
> }
> @@ -8597,7 +8601,8 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> rn = (insn >> 16) & 0xf;
> rd = (insn >> 12) & 0xf;
> tmp2 = load_reg(s, rn);
> - i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
> + i = (insn & 0x01200000) == 0x00200000 ?
> + MEM_INDEX_USER(s) : MEM_INDEX(s);
> if (insn & (1 << 24))
> gen_add_data_offset(s, insn, tmp2);
> if (insn & (1 << 20)) {
> @@ -8681,7 +8686,7 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> if (insn & (1 << 20)) {
> /* load */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> if (user) {
> tmp2 = tcg_const_i32(i);
> gen_helper_set_user_reg(cpu_env, tmp2, tmp);
> @@ -8708,7 +8713,7 @@ static void disas_arm_insn(CPUARMState * env,
> DisasContext *s)
> } else {
> tmp = load_reg(s, i);
> }
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> j++;
> @@ -8974,20 +8979,20 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> if (insn & (1 << 20)) {
> /* ldrd */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> store_reg(s, rs, tmp);
> tcg_gen_addi_i32(addr, addr, 4);
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> store_reg(s, rd, tmp);
> } else {
> /* strd */
> tmp = load_reg(s, rs);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> tcg_gen_addi_i32(addr, addr, 4);
> tmp = load_reg(s, rd);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> if (insn & (1 << 21)) {
> @@ -9025,11 +9030,11 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> tcg_gen_add_i32(addr, addr, tmp);
> tcg_temp_free_i32(tmp);
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> } else { /* tbb */
> tcg_temp_free_i32(tmp);
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> }
> tcg_temp_free_i32(addr);
> tcg_gen_shli_i32(tmp, tmp, 1);
> @@ -9066,13 +9071,13 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> tmp = tcg_temp_new_i32();
> switch (op) {
> case 0: /* ldab */
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> break;
> case 1: /* ldah */
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> break;
> case 2: /* lda */
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> break;
> default:
> abort();
> @@ -9082,13 +9087,13 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> tmp = load_reg(s, rs);
> switch (op) {
> case 0: /* stlb */
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> break;
> case 1: /* stlh */
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> break;
> case 2: /* stl */
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> break;
> default:
> abort();
> @@ -9116,10 +9121,10 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> tcg_gen_addi_i32(addr, addr, -8);
> /* Load PC into tmp and CPSR into tmp2. */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, 0);
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> tcg_gen_addi_i32(addr, addr, 4);
> tmp2 = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp2, addr, 0);
> + gen_aa32_ld32u(tmp2, addr, MEM_INDEX(s));
> if (insn & (1 << 21)) {
> /* Base writeback. */
> if (insn & (1 << 24)) {
> @@ -9158,7 +9163,7 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> if (insn & (1 << 20)) {
> /* Load. */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> if (i == 15) {
> gen_bx(s, tmp);
> } else if (i == rn) {
> @@ -9170,7 +9175,7 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> } else {
> /* Store. */
> tmp = load_reg(s, i);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_gen_addi_i32(addr, addr, 4);
> @@ -9870,7 +9875,7 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> {
> int postinc = 0;
> int writeback = 0;
> - int user;
> + int mem_idx;
> if ((insn & 0x01100000) == 0x01000000) {
> if (disas_neon_ls_insn(env, s, insn))
> goto illegal_op;
> @@ -9914,7 +9919,7 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> return 1;
> }
> }
> - user = IS_USER(s);
> + mem_idx = MEM_INDEX(s);
> if (rn == 15) {
> addr = tcg_temp_new_i32();
> /* PC relative. */
> @@ -9951,7 +9956,7 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> break;
> case 0xe: /* User privilege. */
> tcg_gen_addi_i32(addr, addr, imm);
> - user = 1;
> + mem_idx = MEM_INDEX_USER(s);
> break;
> case 0x9: /* Post-decrement. */
> imm = -imm;
> @@ -9978,19 +9983,19 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> tmp = tcg_temp_new_i32();
> switch (op) {
> case 0:
> - gen_aa32_ld8u(tmp, addr, user);
> + gen_aa32_ld8u(tmp, addr, mem_idx);
> break;
> case 4:
> - gen_aa32_ld8s(tmp, addr, user);
> + gen_aa32_ld8s(tmp, addr, mem_idx);
> break;
> case 1:
> - gen_aa32_ld16u(tmp, addr, user);
> + gen_aa32_ld16u(tmp, addr, mem_idx);
> break;
> case 5:
> - gen_aa32_ld16s(tmp, addr, user);
> + gen_aa32_ld16s(tmp, addr, mem_idx);
> break;
> case 2:
> - gen_aa32_ld32u(tmp, addr, user);
> + gen_aa32_ld32u(tmp, addr, mem_idx);
> break;
> default:
> tcg_temp_free_i32(tmp);
> @@ -10007,13 +10012,13 @@ static int disas_thumb2_insn(CPUARMState *env,
> DisasContext *s, uint16_t insn_hw
> tmp = load_reg(s, rs);
> switch (op) {
> case 0:
> - gen_aa32_st8(tmp, addr, user);
> + gen_aa32_st8(tmp, addr, mem_idx);
> break;
> case 1:
> - gen_aa32_st16(tmp, addr, user);
> + gen_aa32_st16(tmp, addr, mem_idx);
> break;
> case 2:
> - gen_aa32_st32(tmp, addr, user);
> + gen_aa32_st32(tmp, addr, mem_idx);
> break;
> default:
> tcg_temp_free_i32(tmp);
> @@ -10150,7 +10155,7 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> addr = tcg_temp_new_i32();
> tcg_gen_movi_i32(addr, val);
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(addr);
> store_reg(s, rd, tmp);
> break;
> @@ -10353,28 +10358,28 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
>
> switch (op) {
> case 0: /* str */
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> break;
> case 1: /* strh */
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> break;
> case 2: /* strb */
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> break;
> case 3: /* ldrsb */
> - gen_aa32_ld8s(tmp, addr, IS_USER(s));
> + gen_aa32_ld8s(tmp, addr, MEM_INDEX(s));
> break;
> case 4: /* ldr */
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> break;
> case 5: /* ldrh */
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> break;
> case 6: /* ldrb */
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> break;
> case 7: /* ldrsh */
> - gen_aa32_ld16s(tmp, addr, IS_USER(s));
> + gen_aa32_ld16s(tmp, addr, MEM_INDEX(s));
> break;
> }
> if (op >= 3) { /* load */
> @@ -10396,12 +10401,12 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> if (insn & (1 << 11)) {
> /* load */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> store_reg(s, rd, tmp);
> } else {
> /* store */
> tmp = load_reg(s, rd);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_temp_free_i32(addr);
> @@ -10418,12 +10423,12 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> if (insn & (1 << 11)) {
> /* load */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld8u(tmp, addr, IS_USER(s));
> + gen_aa32_ld8u(tmp, addr, MEM_INDEX(s));
> store_reg(s, rd, tmp);
> } else {
> /* store */
> tmp = load_reg(s, rd);
> - gen_aa32_st8(tmp, addr, IS_USER(s));
> + gen_aa32_st8(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_temp_free_i32(addr);
> @@ -10440,12 +10445,12 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> if (insn & (1 << 11)) {
> /* load */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld16u(tmp, addr, IS_USER(s));
> + gen_aa32_ld16u(tmp, addr, MEM_INDEX(s));
> store_reg(s, rd, tmp);
> } else {
> /* store */
> tmp = load_reg(s, rd);
> - gen_aa32_st16(tmp, addr, IS_USER(s));
> + gen_aa32_st16(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_temp_free_i32(addr);
> @@ -10461,12 +10466,12 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> if (insn & (1 << 11)) {
> /* load */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> store_reg(s, rd, tmp);
> } else {
> /* store */
> tmp = load_reg(s, rd);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_temp_free_i32(addr);
> @@ -10534,12 +10539,12 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> if (insn & (1 << 11)) {
> /* pop */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> store_reg(s, i, tmp);
> } else {
> /* push */
> tmp = load_reg(s, i);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> /* advance to the next address. */
> @@ -10551,13 +10556,13 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> if (insn & (1 << 11)) {
> /* pop pc */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> /* don't set the pc until the rest of the instruction
> has completed */
> } else {
> /* push lr */
> tmp = load_reg(s, 14);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> tcg_gen_addi_i32(addr, addr, 4);
> @@ -10686,7 +10691,7 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> if (insn & (1 << 11)) {
> /* load */
> tmp = tcg_temp_new_i32();
> - gen_aa32_ld32u(tmp, addr, IS_USER(s));
> + gen_aa32_ld32u(tmp, addr, MEM_INDEX(s));
> if (i == rn) {
> loaded_var = tmp;
> } else {
> @@ -10695,7 +10700,7 @@ static void disas_thumb_insn(CPUARMState *env,
> DisasContext *s)
> } else {
> /* store */
> tmp = load_reg(s, i);
> - gen_aa32_st32(tmp, addr, IS_USER(s));
> + gen_aa32_st32(tmp, addr, MEM_INDEX(s));
> tcg_temp_free_i32(tmp);
> }
> /* advance to the next address */
> @@ -10813,6 +10818,8 @@ static inline void
> gen_intermediate_code_internal(ARMCPU *cpu,
> #if !defined(CONFIG_USER_ONLY)
> dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
> dc->ns = ARM_TBFLAG_NS(tb->flags);
> + dc->mem_idx = (IS_USER(dc) ? MMU_USER_BIT : 0) |
> + (IS_NS(dc) ? 0 : MMU_SECURE_BIT);
> #endif
> dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
> dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
> diff --git a/target-arm/translate.h b/target-arm/translate.h
> index 5732738..eeb77fb 100644
> --- a/target-arm/translate.h
> +++ b/target-arm/translate.h
> @@ -20,6 +20,7 @@ typedef struct DisasContext {
> #if !defined(CONFIG_USER_ONLY)
> int user;
> int ns;
> + int mem_idx;
> #endif
> bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
> bool vfp_enabled; /* FP enabled via FPSCR.EN */
[Qemu-devel] [PATCH v2 20/23] target-arm: add MVBAR support, Fabian Aggeler, 2014/05/13
[Qemu-devel] [PATCH v2 13/23] target-arm: Split TLB for secure state and EL3 in Aarch64, Fabian Aggeler, 2014/05/13
- Re: [Qemu-devel] [PATCH v2 13/23] target-arm: Split TLB for secure state and EL3 in Aarch64,
Sergey Fedorov <=
[Qemu-devel] [PATCH v2 21/23] target-arm: implement SMC instruction, Fabian Aggeler, 2014/05/13
[Qemu-devel] [PATCH v2 15/23] target-arm: Restrict EL3 to Aarch32 state, Fabian Aggeler, 2014/05/13
[Qemu-devel] [PATCH v2 23/23] target-arm: Respect SCR.FW, SCR.AW and SCTLR.NMFI, Fabian Aggeler, 2014/05/13
[Qemu-devel] [PATCH v2 16/23] target-arm: Use arm_current_sctlr to access SCTLR, Fabian Aggeler, 2014/05/13
[Qemu-devel] [PATCH v2 19/23] target-arm: maintain common bits of banked CP registers, Fabian Aggeler, 2014/05/13