qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 05/15] accel/tcg: Handle page span access before i/o access


From: Richard Henderson
Subject: [PATCH 05/15] accel/tcg: Handle page span access before i/o access
Date: Sat, 19 Jun 2021 10:26:16 -0700

At present this is a distinction without much effect.
But this will enable further improvements.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/cputlb.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 23a97849be..6209e00c9b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1916,6 +1916,14 @@ load_helper(CPUArchState *env, target_ulong addr, 
TCGMemOpIdx oi,
         tlb_addr &= ~TLB_INVALID_MASK;
     }
 
+    /* Handle access that spans two pages. */
+    if (size > 1
+        && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+                    >= TARGET_PAGE_SIZE)) {
+        return load_helper_unaligned(env, addr, oi, retaddr, op,
+                                     code_read, byte_load);
+    }
+
     /* Handle anything that isn't just a straight memory access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
         CPUIOTLBEntry *iotlbentry;
@@ -1957,14 +1965,6 @@ load_helper(CPUArchState *env, target_ulong addr, 
TCGMemOpIdx oi,
         return load_memop(haddr, op);
     }
 
-    /* Handle slow unaligned access (it spans two pages or IO).  */
-    if (size > 1
-        && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
-                    >= TARGET_PAGE_SIZE)) {
-        return load_helper_unaligned(env, addr, oi, retaddr, op,
-                                     code_read, byte_load);
-    }
-
     haddr = (void *)((uintptr_t)addr + entry->addend);
     return load_memop(haddr, op);
 }
@@ -2421,6 +2421,16 @@ store_helper(CPUArchState *env, target_ulong addr, 
uint64_t val,
         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
     }
 
+    /* Handle access that spans two pages. */
+    if (size > 1
+        && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
+                     >= TARGET_PAGE_SIZE)) {
+    do_unaligned_access:
+        store_helper_unaligned(env, addr, val, retaddr, size,
+                               mmu_idx, memop_big_endian(op));
+        return;
+    }
+
     /* Handle anything that isn't just a straight memory access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
         CPUIOTLBEntry *iotlbentry;
@@ -2474,16 +2484,6 @@ store_helper(CPUArchState *env, target_ulong addr, 
uint64_t val,
         return;
     }
 
-    /* Handle slow unaligned access (it spans two pages or IO).  */
-    if (size > 1
-        && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
-                     >= TARGET_PAGE_SIZE)) {
-    do_unaligned_access:
-        store_helper_unaligned(env, addr, val, retaddr, size,
-                               mmu_idx, memop_big_endian(op));
-        return;
-    }
-
     haddr = (void *)((uintptr_t)addr + entry->addend);
     store_memop(haddr, val, op);
 }
-- 
2.25.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]