qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC 09/30] softmmu: add atomic helpers


From: Emilio G. Cota
Subject: [Qemu-devel] [RFC 09/30] softmmu: add atomic helpers
Date: Mon, 27 Jun 2016 15:01:55 -0400

Signed-off-by: Emilio G. Cota <address@hidden>
---
 softmmu_template.h | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 tcg/tcg.h          | 30 ++++++++++++++++++++++
 2 files changed, 105 insertions(+)

diff --git a/softmmu_template.h b/softmmu_template.h
index 7b519dc..fda92ff 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -606,6 +606,81 @@ glue(glue(helper_cmpxchg, SUFFIX),
     return atomic_cmpxchg((DATA_TYPE *)haddr, old, new);
 }
 
+#define GEN_ATOMIC_HELPER(NAME)                                         \
+DATA_TYPE                                                               \
+glue(glue(glue(helper_atomic_, NAME), SUFFIX),                          \
+     MMUSUFFIX)(CPUArchState *env, target_ulong addr, DATA_TYPE val,    \
+                TCGMemOpIdx oi, uintptr_t retaddr)                      \
+{                                                                       \
+    unsigned mmu_idx = get_mmuidx(oi);                                  \
+    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);        \
+    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;  \
+    uintptr_t haddr;                                                    \
+                                                                        \
+    /* Adjust the given return address.  */                             \
+    retaddr -= GETPC_ADJ;                                               \
+                                                                        \
+    /* If the TLB entry is for a different page, reload and try again */\
+    if ((addr & TARGET_PAGE_MASK)                                       \
+        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {        \
+        if (unlikely((addr & (DATA_SIZE - 1)) != 0                      \
+                     && (get_memop(oi) & MO_AMASK) == MO_ALIGN)) {      \
+            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,\
+                                 mmu_idx, retaddr);                     \
+        }                                                               \
+        if (!VICTIM_TLB_HIT(addr_write)) {                              \
+            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx,   \
+                     retaddr);                                          \
+        }                                                               \
+        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;           \
+    }                                                                   \
+                                                                        \
+    /* Handle an IO access.  */                                         \
+    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {                       \
+        /* XXX */                                                       \
+        abort();                                                        \
+    }                                                                   \
+                                                                        \
+    /* Handle slow unaligned access (it spans two pages or IO).  */     \
+    if (DATA_SIZE > 1                                                   \
+        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1          \
+                    >= TARGET_PAGE_SIZE)) {                             \
+        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {                   \
+            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, \
+                                 mmu_idx, retaddr);                     \
+        }                                                               \
+    }                                                                   \
+                                                                        \
+    /* Handle aligned access or unaligned access in the same page.  */  \
+    if (unlikely((addr & (DATA_SIZE - 1)) != 0                          \
+                 && (get_memop(oi) & MO_AMASK) == MO_ALIGN)) {          \
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,    \
+                             mmu_idx, retaddr);                         \
+    }                                                                   \
+    /*                                                                  \
+     * If the host allows unaligned accesses, then let the compiler     \
+     * do its thing when performing the access on the host.             \
+     */                                                                 \
+    haddr = addr + env->tlb_table[mmu_idx][index].addend;               \
+    return glue(atomic_, NAME)((DATA_TYPE *)haddr, val);                \
+}                                                                       \
+
+GEN_ATOMIC_HELPER(fetch_add)
+GEN_ATOMIC_HELPER(fetch_sub)
+GEN_ATOMIC_HELPER(fetch_and)
+GEN_ATOMIC_HELPER(fetch_or)
+GEN_ATOMIC_HELPER(fetch_xor)
+
+GEN_ATOMIC_HELPER(add_fetch)
+GEN_ATOMIC_HELPER(sub_fetch)
+GEN_ATOMIC_HELPER(and_fetch)
+GEN_ATOMIC_HELPER(or_fetch)
+GEN_ATOMIC_HELPER(xor_fetch)
+
+GEN_ATOMIC_HELPER(xchg)
+
+#undef GEN_ATOMIC_HELPER
+
 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
 
 #undef READ_ACCESS_TYPE
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 1c9c8bc..09aab4e 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -1122,6 +1122,36 @@ uint64_t helper_cmpxchgq_mmu(CPUArchState *env, 
target_ulong addr,
                              uint64_t old, uint64_t new,
                              TCGMemOpIdx oi, uintptr_t retaddr);
 
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)                           \
+TYPE glue(glue(glue(helper_atomic_,                                     \
+                    NAME),                                              \
+               SUFFIX),                                                 \
+          _mmu)(CPUArchState *env, target_ulong addr, TYPE val,         \
+                TCGMemOpIdx oi, uintptr_t retaddr);
+
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
+    GEN_ATOMIC_HELPER(NAME, uint8_t, b)   \
+    GEN_ATOMIC_HELPER(NAME, uint16_t, w)  \
+    GEN_ATOMIC_HELPER(NAME, uint32_t, l)  \
+    GEN_ATOMIC_HELPER(NAME, uint64_t, q)
+
+GEN_ATOMIC_HELPER_ALL(fetch_add)
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
+GEN_ATOMIC_HELPER_ALL(fetch_and)
+GEN_ATOMIC_HELPER_ALL(fetch_or)
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
+
+GEN_ATOMIC_HELPER_ALL(add_fetch)
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
+GEN_ATOMIC_HELPER_ALL(and_fetch)
+GEN_ATOMIC_HELPER_ALL(or_fetch)
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
+
+GEN_ATOMIC_HELPER_ALL(xchg)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
 #endif /* CONFIG_SOFTMMU */
 
 #endif /* TCG_H */
-- 
2.5.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]