qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-ppc] [PATCH 30/32] mmu-hash64: Correctly mask RPN from hash PTE


From: David Gibson
Subject: [Qemu-ppc] [PATCH 30/32] mmu-hash64: Correctly mask RPN from hash PTE
Date: Fri, 15 Feb 2013 19:01:20 +1100

At present, to generate the real address from the PTE contents, we simply
copy the second PTE word as is.  This is incorrect.  First, it leaves
the flags from the low bits of PTE word 1 in place in the real address.  We
get away with that because all the callers mask it with TARGET_PAGE_MASK.

However, more recent CPUs also have a small number of flag bits (PP0 and
KEY) in the top bits of PTE word 1.  Any guest which used those bits would
fail with the current code.

This patch fixes the problem by correctly masking out the RPN field of
PTE word 1.  This is safe, even for older CPUs which didn't have PP0 and
KEY, because although the RPN notionally extended to the very top of PTE
word 1, none of those CPUs actually implemented that many real address
bits.

Signed-off-by: David Gibson <address@hidden>
---
 target-ppc/mmu-hash64.c |   30 +++++++++++++++++++-----------
 1 file changed, 19 insertions(+), 11 deletions(-)

diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 77b3455..848440c 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -361,6 +361,23 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
     return pte_offset;
 }
 
+static hwaddr ppc_hash64_pte_real_address(ppc_slb_t *slb,
+                                          ppc_hash_pte64_t pte,
+                                          target_ulong eaddr)
+{
+    int pbits;
+    target_ulong pageoff;
+
+    /* Compute the full real address by combining the RPN from the PTE
+     * with the remaining address bits from the effective address */
+
+    /* FIXME: Should extend this to handle LLP page size extensions */
+    pbits = (slb->vsid & SLB_VSID_L) ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
+    pageoff = eaddr & ((1ULL << pbits) - 1);
+
+    return (pte.pte1 & HPTE_R_RPN) | pageoff;
+}
+
 static int ppc_hash64_translate(CPUPPCState *env, struct mmu_ctx_hash64 *ctx,
                                 target_ulong eaddr, int rwx)
 {
@@ -368,7 +385,6 @@ static int ppc_hash64_translate(CPUPPCState *env, struct 
mmu_ctx_hash64 *ctx,
     hwaddr pte_offset;
     ppc_hash_pte64_t pte;
     uint64_t new_pte1;
-    int target_page_bits;
     const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
 
     assert((rwx == 0) || (rwx == 1) || (rwx == 2));
@@ -430,16 +446,8 @@ static int ppc_hash64_translate(CPUPPCState *env, struct 
mmu_ctx_hash64 *ctx,
         ppc_hash64_store_hpte1(env, pte_offset, new_pte1);
     }
 
-    ctx->raddr = pte.pte1;
-
-    /* We have a TLB that saves 4K pages, so let's
-     * split a huge page to 4k chunks */
-    target_page_bits = (slb->vsid & SLB_VSID_L)
-        ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
-    if (target_page_bits != TARGET_PAGE_BITS) {
-        ctx->raddr |= (eaddr & ((1 << target_page_bits) - 1))
-                      & TARGET_PAGE_MASK;
-    }
+    /* 7. Compute the final address from PTE and offset in page */
+    ctx->raddr = ppc_hash64_pte_real_address(slb, pte, eaddr);
 
     return 0;
 }
-- 
1.7.10.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]