qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v2 16/23] target-arm: Use arm_current_sctlr to a


From: Aggeler Fabian
Subject: Re: [Qemu-devel] [PATCH v2 16/23] target-arm: Use arm_current_sctlr to access SCTLR
Date: Thu, 22 May 2014 14:56:22 +0000

On 22 May 2014, at 09:33, Edgar E. Iglesias <address@hidden> wrote:

> On Tue, May 13, 2014 at 06:16:01PM +0200, Fabian Aggeler wrote:
>> Add SCTLR_EL3 and introduce new function to access correct
>> instance of SCTLR in different modes/worlds.
> 
> Hi,
> 
> AArch64 has a couple of insn/regs that do address translation
> as seen by other ELs. E.g, from EL3 you can perform address
> translation as seen by EL0. See for example ATS12E0R.

Good point, didn’t know about them. 

> AArch32 has similar features, it can also lower S to NS when in S mode.

You mean other features than AT* operations? Or do you mean the possibility
to access both secure and non-secure instances from monitor mode by setting the
SCR.NS bit? This is currently handled by using the A32_MAPPED_EL3_REG_GET
which only gets the secure instance when the SCR.NS bit is clear.

> 
> With regards to arm_current_sctlr() and the use in get_phys_addr, I
> don't think it is enough here.
> 
> I was planning to post was patches that add new args to get_phys_addr()
> with the translation-EL and flags to control if stage-2 translation
> should be done or not. That way ats_write() can keep reusing
> get_phys_addr(). We would need to pass the security state as an argument
> aswell to handle AArch32. Does that make sense?

>From my view that makes sense. 

I went through my changes again and in most of the locations where I changed
SCTLR access to use arm_current_sctlr() we should actually just access
SCTLR_EL1 (aa64_*_access() or ctr_el0_access()).

And otherwise:

- arm_cpu_do_interrupt() we still need to get distinguish between EL3 using
   - Aarch32 (get secure/non-secure instance depending on arm_is_secure) or 
   - Aarch64 (get SCTLR_EL1).

- check_ap() is only used by get_phys_addr_v5/v6() so just return 
  secure / non-secure instance.

- arm_cpu_reset() in cpu.c we only need to check the SCTLR.V bit [13] for CPUs
  without Aarch64, but need to use secure or non-secure instance depending on 
  the which world we are booting in.

If I didn’t miss or misinterpret something in this list I guess this does not 
really need 
its own function then as we don’t have much duplication.

Best,
Fabian


> 
> Cheers,
> Edgar
> 
>> 
>> Signed-off-by: Fabian Aggeler <address@hidden>
>> ---
>> hw/arm/pxa2xx.c        |  2 +-
>> target-arm/cpu-qom.h   |  1 +
>> target-arm/cpu.c       |  4 +--
>> target-arm/cpu.h       | 14 ++++++++++-
>> target-arm/helper.c    | 67 
>> ++++++++++++++++++++++++++++++++++++++------------
>> target-arm/op_helper.c |  2 +-
>> 6 files changed, 69 insertions(+), 21 deletions(-)
>> 
>> diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
>> index e0cd847..8b69e72 100644
>> --- a/hw/arm/pxa2xx.c
>> +++ b/hw/arm/pxa2xx.c
>> @@ -274,7 +274,7 @@ static void pxa2xx_pwrmode_write(CPUARMState *env, const 
>> ARMCPRegInfo *ri,
>>     case 3:
>>         s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC;
>>         s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I;
>> -        s->cpu->env.cp15.c1_sys = 0;
>> +        s->cpu->env.cp15.c1_sys_el1 = 0;
>>         s->cpu->env.cp15.c1_coproc = 0;
>>         s->cpu->env.cp15.ttbr0_el1 = 0;
>>         s->cpu->env.cp15.c3 = 0;
>> diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h
>> index edc7f26..38cbd43 100644
>> --- a/target-arm/cpu-qom.h
>> +++ b/target-arm/cpu-qom.h
>> @@ -119,6 +119,7 @@ typedef struct ARMCPU {
>>     uint32_t mvfr2;
>>     uint32_t ctr;
>>     uint32_t reset_sctlr;
>> +    uint32_t reset_sctlr_el3;
>>     uint32_t id_pfr0;
>>     uint32_t id_pfr1;
>>     uint32_t id_dfr0;
>> diff --git a/target-arm/cpu.c b/target-arm/cpu.c
>> index b0d4a9b..bdbdbb1 100644
>> --- a/target-arm/cpu.c
>> +++ b/target-arm/cpu.c
>> @@ -100,7 +100,7 @@ static void arm_cpu_reset(CPUState *s)
>> #if defined(CONFIG_USER_ONLY)
>>         env->pstate = PSTATE_MODE_EL0t;
>>         /* Userspace expects access to CTL_EL0 and the cache ops */
>> -        env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI;
>> +        env->cp15.c1_sys_el1 |= SCTLR_UCT | SCTLR_UCI;
>>         /* and to the FP/Neon instructions */
>>         env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
>> #else
>> @@ -146,7 +146,7 @@ static void arm_cpu_reset(CPUState *s)
>>         }
>>     }
>> 
>> -    if (env->cp15.c1_sys & SCTLR_V) {
>> +    if (arm_current_sctlr(env) & SCTLR_V) {
>>             env->regs[15] = 0xFFFF0000;
>>     }
>> 
>> diff --git a/target-arm/cpu.h b/target-arm/cpu.h
>> index a4bb6bd..780c1f5 100644
>> --- a/target-arm/cpu.h
>> +++ b/target-arm/cpu.h
>> @@ -180,7 +180,8 @@ typedef struct CPUARMState {
>>     struct {
>>         uint32_t c0_cpuid;
>>         uint64_t c0_cssel; /* Cache size selection.  */
>> -        uint64_t c1_sys; /* System control register.  */
>> +        uint64_t c1_sys_el1; /* System control register (EL1). */
>> +        uint64_t c1_sys_el3; /* System control register (EL3).  */
>>         uint64_t c1_coproc; /* Coprocessor access register.  */
>>         uint32_t c1_xscaleauxcr; /* XScale auxiliary control register.  */
>>         uint32_t c1_scr; /* secure config register.  */
>> @@ -971,6 +972,17 @@ static inline int arm_current_pl(CPUARMState *env)
>>     return 1;
>> }
>> 
>> +static inline uint64_t arm_current_sctlr(CPUARMState *env)
>> +{
>> +    if (is_a64(env) && arm_current_pl(env) == 3) {
>> +        /* EL3 has its own SCTLR */
>> +        return env->cp15.c1_sys_el3;
>> +    } else {
>> +        /* Only A32 with NS-bit clear accesses secure instance of SCTLR */
>> +        return A32_MAPPED_EL3_REG_GET(env, c1_sys);
>> +    }
>> +}
>> +
>> typedef struct ARMCPRegInfo ARMCPRegInfo;
>> 
>> typedef enum CPAccessResult {
>> diff --git a/target-arm/helper.c b/target-arm/helper.c
>> index 98c3dc9..ac8b15a 100644
>> --- a/target-arm/helper.c
>> +++ b/target-arm/helper.c
>> @@ -1767,7 +1767,7 @@ static void aa64_fpsr_write(CPUARMState *env, const 
>> ARMCPRegInfo *ri,
>> 
>> static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo 
>> *ri)
>> {
>> -    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
>> +    if (arm_current_pl(env) == 0 && !(arm_current_sctlr(env) & SCTLR_UMA)) {
>>         return CP_ACCESS_TRAP;
>>     }
>>     return CP_ACCESS_OK;
>> @@ -1785,7 +1785,7 @@ static CPAccessResult aa64_cacheop_access(CPUARMState 
>> *env,
>>     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
>>      * SCTLR_EL1.UCI is set.
>>      */
>> -    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
>> +    if (arm_current_pl(env) == 0 && !(arm_current_sctlr(env) & SCTLR_UCI)) {
>>         return CP_ACCESS_TRAP;
>>     }
>>     return CP_ACCESS_OK;
>> @@ -1823,7 +1823,7 @@ static CPAccessResult aa64_zva_access(CPUARMState 
>> *env, const ARMCPRegInfo *ri)
>>     /* We don't implement EL2, so the only control on DC ZVA is the
>>      * bit in the SCTLR which can prohibit access for EL0.
>>      */
>> -    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
>> +    if (arm_current_pl(env) == 0 && !(arm_current_sctlr(env) & SCTLR_DZE)) {
>>         return CP_ACCESS_TRAP;
>>     }
>>     return CP_ACCESS_OK;
>> @@ -2146,7 +2146,7 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, 
>> const ARMCPRegInfo *ri)
>>     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
>>      * but the AArch32 CTR has its own reginfo struct)
>>      */
>> -    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
>> +    if (arm_current_pl(env) == 0 && !(arm_current_sctlr(env) & SCTLR_UCT)) {
>>         return CP_ACCESS_TRAP;
>>     }
>>     return CP_ACCESS_OK;
>> @@ -2573,10 +2573,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
>> 
>>     /* Generic registers whose values depend on the implementation */
>>     {
>> -        ARMCPRegInfo sctlr = {
>> -            .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
>> -            .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
>> -            .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, 
>> cp15.c1_sys),
>> +        ARMCPRegInfo sctlr_el1 = {
>> +            .name = "SCTLR_EL1", .state = ARM_CP_STATE_BOTH,
>> +            .type = ARM_CP_NONSECURE, .opc0 = 3, .crn = 1, .crm = 0, .opc1 
>> = 0,
>> +            .opc2 = 0, .access = PL1_RW,
>> +            .fieldoffset = offsetof(CPUARMState, cp15.c1_sys_el1),
>>             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
>>             .raw_writefn = raw_write,
>>         };
>> @@ -2585,9 +2586,43 @@ void register_cp_regs_for_features(ARMCPU *cpu)
>>              * arch/arm/mach-pxa/sleep.S expects two instructions following
>>              * an MMU enable to execute from cache.  Imitate this behaviour.
>>              */
>> -            sctlr.type |= ARM_CP_SUPPRESS_TB_END;
>> +            sctlr_el1.type |= ARM_CP_SUPPRESS_TB_END;
>>         }
>> -        define_one_arm_cp_reg(cpu, &sctlr);
>> +        define_one_arm_cp_reg(cpu, &sctlr_el1);
>> +
>> +        ARMCPRegInfo sctlr_el1_s = {
>> +            .name = "SCTLR_EL1(S)", .type = ARM_CP_SECURE,
>> +            .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
>> +            .access = PL3_RW,
>> +            .fieldoffset = offsetof(CPUARMState, cp15.c1_sys_el3),
>> +            .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
>> +            .raw_writefn = raw_write,
>> +        };
>> +        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
>> +            /* Normally we would always end the TB on an SCTLR write, but 
>> Linux
>> +             * arch/arm/mach-pxa/sleep.S expects two instructions following
>> +             * an MMU enable to execute from cache.  Imitate this behaviour.
>> +             */
>> +            sctlr_el1_s.type |= ARM_CP_SUPPRESS_TB_END;
>> +        }
>> +        define_one_arm_cp_reg(cpu, &sctlr_el1_s);
>> +
>> +        ARMCPRegInfo sctlr_el3 = {
>> +             .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
>> +             .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
>> +             .access = PL3_RW, .type = ARM_CP_SECURE,
>> +             .fieldoffset = offsetof(CPUARMState, cp15.c1_sys_el3),
>> +             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr_el3,
>> +             .raw_writefn = raw_write,
>> +        };
>> +        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
>> +            /* Normally we would always end the TB on an SCTLR write, but 
>> Linux
>> +             * arch/arm/mach-pxa/sleep.S expects two instructions following
>> +             * an MMU enable to execute from cache.  Imitate this behaviour.
>> +             */
>> +            sctlr_el3.type |= ARM_CP_SUPPRESS_TB_END;
>> +        }
>> +        define_one_arm_cp_reg(cpu, &sctlr_el3);
>>     }
>> }
>> 
>> @@ -3475,7 +3510,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
>>         return; /* Never happens.  Keep compiler happy.  */
>>     }
>>     /* High vectors.  */
>> -    if (env->cp15.c1_sys & SCTLR_V) {
>> +    if (arm_current_sctlr(env) & SCTLR_V) {
>>         /* when enabled, base address cannot be remapped.  */
>>         addr += 0xffff0000;
>>     } else {
>> @@ -3498,7 +3533,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
>>     /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
>>      * and we should just guard the thumb mode on V4 */
>>     if (arm_feature(env, ARM_FEATURE_V4T)) {
>> -        env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0;
>> +        env->thumb = (arm_current_sctlr(env) & SCTLR_TE) != 0;
>>     }
>>     env->regs[14] = env->regs[15] + offset;
>>     env->regs[15] = addr;
>> @@ -3529,7 +3564,7 @@ static inline int check_ap(CPUARMState *env, int ap, 
>> int domain_prot,
>>       }
>>       if (access_type == 1)
>>           return 0;
>> -      switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) {
>> +      switch (arm_current_sctlr(env) & (SCTLR_S | SCTLR_R)) {
>>       case SCTLR_S:
>>           return is_user ? 0 : PAGE_READ;
>>       case SCTLR_R:
>> @@ -3763,7 +3798,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t 
>> address, int access_type,
>>             goto do_fault;
>> 
>>         /* The simplified model uses AP[0] as an access control bit.  */
>> -        if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) {
>> +        if ((arm_current_sctlr(env) & SCTLR_AFE) && (ap & 1) == 0) {
>>             /* Access flag fault.  */
>>             code = (code == 15) ? 6 : 3;
>>             goto do_fault;
>> @@ -4099,7 +4134,7 @@ static inline int get_phys_addr(CPUARMState *env, 
>> target_ulong address,
>>     if (address < 0x02000000)
>>         address += env->cp15.c13_fcse;
>> 
>> -    if ((env->cp15.c1_sys & SCTLR_M) == 0) {
>> +    if ((arm_current_sctlr(env) & SCTLR_M) == 0) {
>>         /* MMU/MPU disabled.  */
>>         *phys_ptr = address;
>>         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
>> @@ -4112,7 +4147,7 @@ static inline int get_phys_addr(CPUARMState *env, 
>> target_ulong address,
>>     } else if (extended_addresses_enabled(env)) {
>>         return get_phys_addr_lpae(env, address, access_type, is_user, 
>> phys_ptr,
>>                                   prot, page_size);
>> -    } else if (env->cp15.c1_sys & SCTLR_XP) {
>> +    } else if (arm_current_sctlr(env) & SCTLR_XP) {
>>         return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
>>                                 prot, page_size);
>>     } else {
>> diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
>> index fb90676..3eacea8 100644
>> --- a/target-arm/op_helper.c
>> +++ b/target-arm/op_helper.c
>> @@ -365,7 +365,7 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, 
>> uint32_t imm)
>>      * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
>>      * to catch that case at translate time.
>>      */
>> -    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
>> +    if (arm_current_pl(env) == 0 && !(arm_current_sctlr(env) & SCTLR_UMA)) {
>>         raise_exception(env, EXCP_UDEF);
>>     }
>> 
>> -- 
>> 1.8.3.2
>> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]