qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-ppc] [RFC 3/9] target-ppc: Rework SLB page size lookup


From: David Gibson
Subject: [Qemu-ppc] [RFC 3/9] target-ppc: Rework SLB page size lookup
Date: Fri, 15 Jan 2016 18:04:34 +1100

Currently, the ppc_hash64_page_shift() function looks up a page size based
on information in an SLB entry.  It open codes the bit translation for
existing CPUs, however different CPU models can have different SLB
encodings.  We already store those in the 'sps' table in CPUPPCState, but
we don't currently enforce that that actually matches the logic in
ppc_hash64_page_shift.

This patch reworks lookup of page size from SLB in several ways:
  * ppc_hash64_page_shift() is replaced by slb_page_size() which
     - Uses the the sps table, so it is correct for whatever encodings are
       stored there
     - Returns a pointer to a table entry, rather than the raw shift,
       which we'll want later on
  * We adjust   ppc_store_slb() to verify that the stored SLBE has a valid
    page size encoding (otherwise it fails, which will trigger an illegal
    instruction exception)
  * ppc_hash64_htab_lookup() is extended to return the SLB's page size in
    addition to other information.
  * Adjust ppc_hash64_pte_raddr() to take a page shift directly
    instead of an SLB entry.  Both callers have just called
    ppc_hash64_htab_lookup() which has already done the SLB -> page
    size lookup.

Signed-off-by: David Gibson <address@hidden>
---
 target-ppc/mmu-hash64.c | 88 ++++++++++++++++++++++++++++---------------------
 1 file changed, 51 insertions(+), 37 deletions(-)

diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 03e25fd..678053b 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -90,6 +90,28 @@ void dump_slb(FILE *f, fprintf_function cpu_fprintf, 
PowerPCCPU *cpu)
     }
 }
 
+static const struct ppc_one_seg_page_size *slb_page_size(PowerPCCPU *cpu,
+                                                         uint64_t slbv)
+{
+    CPUPPCState *env = &cpu->env;
+    int i;
+
+    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+        const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+
+        if (!sps->page_shift) {
+            break;
+        }
+
+        if ((slbv & SLB_VSID_LLP_MASK) == sps->slb_enc) {
+            return sps;
+        }
+    }
+
+    /* Bad page size encoding */
+    return NULL;
+}
+
 void helper_slbia(CPUPPCState *env)
 {
     PowerPCCPU *cpu = ppc_env_get_cpu(env);
@@ -150,6 +172,9 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, 
target_ulong rs)
     if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
         return -1; /* 1T segment on MMU that doesn't support it */
     }
+    if (!slb_page_size(cpu, rs)) {
+        return -1; /* Bad page size encoding for this CPU */
+    }
 
     /* Mask out the slot number as we store the entry */
     slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
@@ -392,46 +417,37 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, 
hwaddr hash,
     return -1;
 }
 
-static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
-{
-    uint64_t epnshift;
-
-    /* Page size according to the SLB, which we use to generate the
-     * EPN for hash table lookup..  When we implement more recent MMU
-     * extensions this might be different from the actual page size
-     * encoded in the PTE */
-    if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
-        epnshift = TARGET_PAGE_BITS;
-    } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
-        epnshift = TARGET_PAGE_BITS_64K;
-    } else {
-        epnshift = TARGET_PAGE_BITS_16M;
-    }
-    return epnshift;
-}
-
 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
                                      ppc_slb_t *slb, target_ulong eaddr,
-                                     ppc_hash_pte64_t *pte)
+                                     ppc_hash_pte64_t *pte,
+                                     const struct ppc_one_seg_page_size **psps)
 {
     CPUPPCState *env = &cpu->env;
     hwaddr pte_offset;
     hwaddr hash;
-    uint64_t vsid, epnshift, epnmask, epn, ptem;
+    const struct ppc_one_seg_page_size *sps;
+    uint64_t vsid, epnmask, epn, ptem;
+
+    sps = slb_page_size(cpu, slb->vsid);
+    /* The SLB store path should prevent any bad page size encodings
+     * getting in there, so: */
+    assert(sps);
+    if (psps) {
+        *psps = sps;
+    }
 
-    epnshift = ppc_hash64_page_shift(slb);
-    epnmask = ~((1ULL << epnshift) - 1);
+    epnmask = ~((1ULL << sps->page_shift) - 1);
 
     if (slb->vsid & SLB_VSID_B) {
         /* 1TB segment */
         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
-        hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
+        hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
     } else {
         /* 256M segment */
         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
-        hash = vsid ^ (epn >> epnshift);
+        hash = vsid ^ (epn >> sps->page_shift);
     }
     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
 
@@ -463,17 +479,12 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
     return pte_offset;
 }
 
-static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
-                                   target_ulong eaddr)
+static hwaddr ppc_hash64_pte_raddr(PowerPCCPU *cpu, unsigned page_shift,
+                                   ppc_hash_pte64_t pte, target_ulong eaddr)
 {
-    hwaddr mask;
-    int target_page_bits;
+    hwaddr mask = (1ULL << page_shift) - 1;
     hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
-    /*
-     * We support 4K, 64K and 16M now
-     */
-    target_page_bits = ppc_hash64_page_shift(slb);
-    mask = (1ULL << target_page_bits) - 1;
+
     return (rpn & ~mask) | (eaddr & mask);
 }
 
@@ -483,6 +494,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, 
target_ulong eaddr,
     CPUState *cs = CPU(cpu);
     CPUPPCState *env = &cpu->env;
     ppc_slb_t *slb;
+    const struct ppc_one_seg_page_size *sps;
     hwaddr pte_offset;
     ppc_hash_pte64_t pte;
     int pp_prot, amr_prot, prot;
@@ -526,7 +538,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, 
target_ulong eaddr,
     }
 
     /* 4. Locate the PTE in the hash table */
-    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
+    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &sps);
     if (pte_offset == -1) {
         if (rwx == 2) {
             cs->exception_index = POWERPC_EXCP_ISI;
@@ -598,7 +610,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, 
target_ulong eaddr,
 
     /* 7. Determine the real address from the PTE */
 
-    raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
+    raddr = ppc_hash64_pte_raddr(cpu, sps->page_shift, pte, eaddr);
 
     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
                  prot, mmu_idx, TARGET_PAGE_SIZE);
@@ -610,6 +622,7 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, 
target_ulong addr)
 {
     CPUPPCState *env = &cpu->env;
     ppc_slb_t *slb;
+    const struct ppc_one_seg_page_size *sps;
     hwaddr pte_offset;
     ppc_hash_pte64_t pte;
 
@@ -623,12 +636,13 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, 
target_ulong addr)
         return -1;
     }
 
-    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
+    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &sps);
     if (pte_offset == -1) {
         return -1;
     }
 
-    return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
+    return ppc_hash64_pte_raddr(cpu, sps->page_shift, pte, addr)
+        & TARGET_PAGE_MASK;
 }
 
 void ppc_hash64_store_hpte(PowerPCCPU *cpu,
-- 
2.5.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]