qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC 3/5] softmmu: Add helpers for a new slow-path


From: Alvise Rigo
Subject: [Qemu-devel] [RFC 3/5] softmmu: Add helpers for a new slow-path
Date: Wed, 6 May 2015 17:38:05 +0200

The new helpers rely on the legacy ones to perform the actual read/write.

The StoreConditional helper (helper_le_stcond_name) returns 1 if the
store has to fail due to a concurrent access to the same page in the by
another vCPU.  A 'concurrent access' can be a store made by *any* vCPU
(although, some implementatios allow stores made by the CPU that issued
the LoadLink).

These helpers also update the TLB entry of the page involved in the
LL/SC, so that all the following accesses made by any vCPU will follow
the slow path.
In real multi-threading, these helpers will require to temporarily pause
the execution of the other vCPUs in order to update accordingly (flush)
the TLB cache.

Some corner cases have still to be proper handled, like when a vCPU doesn't
pair a LoadLink with a StoreConditional.

Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
 cputlb.c                |   4 +
 softmmu_llsc_template.h | 233 ++++++++++++++++++++++++++++++++++++++++++++++++
 softmmu_template.h      |   4 +
 tcg/tcg-be-ldst.h       |   2 +
 tcg/tcg.h               |  20 +++++
 5 files changed, 263 insertions(+)
 create mode 100644 softmmu_llsc_template.h

diff --git a/cputlb.c b/cputlb.c
index 3e4ccba..415196d 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -378,8 +378,12 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, 
target_ulong addr)
 #define SHIFT 1
 #include "softmmu_template.h"
 
+/* Generates LoadLink/StoreConditional helpers (for the time being only 4 
bytes) */
+#define GEN_EXCLUSIVE_HELPERS
 #define SHIFT 2
 #include "softmmu_template.h"
+#include "softmmu_llsc_template.h"
+#undef GEN_EXCLUSIVE_HELPERS
 
 #define SHIFT 3
 #include "softmmu_template.h"
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
new file mode 100644
index 0000000..9f25db4
--- /dev/null
+++ b/softmmu_llsc_template.h
@@ -0,0 +1,233 @@
+/*
+ *  Software MMU support (esclusive load/store operations)
+ *
+ * Generate helpers used by TCG for qemu_ldlink/stcond ops and code load
+ * functions.
+ *
+ * Included from target op helpers and exec.c.
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_SIZE (1 << SHIFT)
+
+#if DATA_SIZE == 8
+#define SUFFIX q
+#define LSUFFIX q
+#define SDATA_TYPE  int64_t
+#define DATA_TYPE  uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX l
+#define LSUFFIX l
+#define SDATA_TYPE  int32_t
+#define DATA_TYPE  uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX w
+#define LSUFFIX uw
+#define SDATA_TYPE  int16_t
+#define DATA_TYPE  uint16_t
+#elif DATA_SIZE == 1
+#define SUFFIX b
+#define LSUFFIX ub
+#define SDATA_TYPE  int8_t
+#define DATA_TYPE  uint8_t
+#else
+#error unsupported data size
+#endif
+
+/* For the benefit of TCG generated code, we want to avoid the complication
+   of ABI-specific return type promotion and always return a value extended
+   to the register size of the host.  This is tcg_target_long, except in the
+   case of a 32-bit host and 64-bit data, and for that we always have
+   uint64_t.  Don't bother with this widened value for SOFTMMU_CODE_ACCESS.  */
+#if DATA_SIZE == 8
+# define WORD_TYPE  DATA_TYPE
+# define USUFFIX    SUFFIX
+#else
+# define WORD_TYPE  tcg_target_ulong
+# define USUFFIX    glue(u, SUFFIX)
+# define SSUFFIX    glue(s, SUFFIX)
+#endif
+
+
+
+#if DATA_SIZE == 1
+# define helper_le_ldlink_name  glue(glue(helper_ret_ldlink, USUFFIX), 
MMUSUFFIX)
+# define helper_be_ldlink_name  helper_le_ldex_name
+# define helper_le_ldlinks_name glue(glue(helper_ret_ldlink, SSUFFIX), 
MMUSUFFIX)
+# define helper_be_ldlinks_name helper_le_ldexs_name
+# define helper_le_stcond_name  glue(glue(helper_ret_stcond, SUFFIX), 
MMUSUFFIX)
+# define helper_be_stcond_name  helper_le_stex_name
+#else
+# define helper_le_ldlink_name  glue(glue(helper_le_ldlink, USUFFIX), 
MMUSUFFIX)
+# define helper_be_ldlink_name  glue(glue(helper_be_ldlink, USUFFIX), 
MMUSUFFIX)
+# define helper_le_ldlinks_name glue(glue(helper_le_ldlink, SSUFFIX), 
MMUSUFFIX)
+# define helper_be_ldlinks_name glue(glue(helper_be_ldlink, SSUFFIX), 
MMUSUFFIX)
+# define helper_le_stcond_name  glue(glue(helper_le_stcond, SUFFIX), MMUSUFFIX)
+# define helper_be_stcond_name  glue(glue(helper_be_stcond, SUFFIX), MMUSUFFIX)
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+# define helper_te_ldlink_name  helper_be_ldlink_name
+# define helper_te_stcond_name  helper_be_stcond_name
+#else
+# define helper_te_ldlink_name  helper_le_ldlink_name
+# define helper_te_stcond_name  helper_le_stcond_name
+#endif
+
+/* helpers from cpu_ldst.h, byte-order independent versions */
+#if DATA_SIZE == 1
+#define helper_ld_legacy glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
+#define helper_st_legacy glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
+#else
+#define helper_ld_legacy glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
+#define helper_st_legacy glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
+#endif
+
+#define is_write_tlb_entry_set(env, page, index)                             \
+({                                                                           \
+    (addr & TARGET_PAGE_MASK)                                                \
+         == ((env->tlb_table[mmu_idx][index].addr_write) &                   \
+                 (TARGET_PAGE_MASK | TLB_INVALID_MASK));                     \
+})                                                                           \
+
+WORD_TYPE helper_le_ldlink_name(CPUArchState *env, target_ulong addr, int 
mmu_idx,
+                                uintptr_t retaddr)
+{
+    WORD_TYPE ret;
+    int index;
+    CPUState *cpu;
+    target_ulong tlb_addr;
+    hwaddr xlat, sz;
+    MemoryRegionSection *section;
+
+    env->ll_sc_context = 1;
+
+    /* Use the proper load helper from cpu_ldst.h */
+    ret = helper_ld_legacy(env, addr, mmu_idx, retaddr);
+
+    /* The last legacy access ensures that the TLB entry for 'addr' has been
+     * created. */
+    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+    tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
+
+    cpu = ENV_GET_CPU(env);
+    section = address_space_translate_for_iotlb(cpu, tlb_addr, &xlat, &sz);
+    cpu_physical_memory_clear_excl_dirty(section->mr->ram_addr + xlat);
+
+    /* Flush the TLB entry to force slow-path for following accesses. */
+    tlb_flush_page(cpu, tlb_addr);
+
+    /* Invalidate the TLB entry for the other processors. The next TLB entries
+     * for this page will have the TLB_EXCL flag set. */
+    CPU_FOREACH(cpu) {
+        if (cpu != current_cpu) {
+            tlb_flush(cpu, 1);
+        }
+    }
+
+    return ret;
+}
+
+uint32_t helper_le_stcond_name(CPUArchState *env, target_ulong addr,
+                               DATA_TYPE val, int mmu_idx, uintptr_t retaddr)
+{
+    CPUState *cpu;
+    uint32_t ret;
+    int index;
+    target_ulong tlb_addr;
+    hwaddr xlat, sz;
+    MemoryRegionSection *section;
+    ram_addr_t ram_addr;
+
+    /* If the TLB entry is not the right one, create it. */
+    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+    if (!is_write_tlb_entry_set(env, addr, index)) {
+        tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+    }
+
+    tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
+
+    cpu = ENV_GET_CPU(env);
+    section = address_space_translate_for_iotlb(cpu, tlb_addr, &xlat, &sz);
+    ram_addr = section->mr->ram_addr + xlat;
+    if (cpu_physical_memory_excl_is_dirty(ram_addr) || !env->ll_sc_context) {
+        /* Another vCPU has accessed the memory after the LoadLink or not link
+         * has been previously set. */
+        ret = 1;
+
+        goto out;
+    }
+
+    helper_st_legacy(env, addr, val, mmu_idx, retaddr);
+
+    /* Set the page as dirty to avoid the creation of TLB entries with the
+     * TLB_EXCL bit set. */
+    cpu_physical_memory_set_excl_dirty(ram_addr);
+
+    /* The StoreConditional succeeded */
+    ret = 0;
+
+out:
+    /* Flush the page since it is no more protected. */
+    tlb_flush_page(cpu, tlb_addr);
+
+    CPU_FOREACH(cpu) {
+        if (cpu != current_cpu) {
+            tlb_flush(cpu, 1);
+        }
+    }
+    env->ll_sc_context = 0;
+
+    return ret;
+}
+
+#undef helper_le_ldlink_name
+#undef helper_be_ldlink_name
+#undef helper_le_ldlinks_name
+#undef helper_be_ldlinks_name
+#undef helper_le_stcond_name
+#undef helper_be_stcond_name
+#undef helper_te_ldlink_name
+#undef helper_te_stcond_name
+#undef helper_ld_legacy
+#undef helper_st_legacy
+
+/* undef softmmu_template macros */
+#undef READ_ACCESS_TYPE
+#undef SHIFT
+#undef DATA_TYPE
+#undef SUFFIX
+#undef LSUFFIX
+#undef DATA_SIZE
+#undef ADDR_READ
+#undef WORD_TYPE
+#undef SDATA_TYPE
+#undef USUFFIX
+#undef SSUFFIX
+#undef BSWAP
+#undef TGT_BE
+#undef TGT_LE
+#undef CPU_BE
+#undef CPU_LE
+#undef helper_le_ld_name
+#undef helper_be_ld_name
+#undef helper_le_lds_name
+#undef helper_be_lds_name
+#undef helper_le_st_name
+#undef helper_be_st_name
+#undef helper_te_ld_name
+#undef helper_te_st_name
diff --git a/softmmu_template.h b/softmmu_template.h
index 1ac99da..b15b2c1 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -594,6 +594,9 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, 
target_ulong addr,
 
 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
 
+#ifdef GEN_EXCLUSIVE_HELPERS
+/* All the defined macros will be undeffed in softmmu_ldstex_template.h */
+#else
 #undef READ_ACCESS_TYPE
 #undef SHIFT
 #undef DATA_TYPE
@@ -618,3 +621,4 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, 
target_ulong addr,
 #undef helper_be_st_name
 #undef helper_te_ld_name
 #undef helper_te_st_name
+#endif
diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h
index 4a45102..7cba87b 100644
--- a/tcg/tcg-be-ldst.h
+++ b/tcg/tcg-be-ldst.h
@@ -24,6 +24,8 @@
 
 typedef struct TCGLabelQemuLdst {
     bool is_ld;             /* qemu_ld: true, qemu_st: false */
+    bool is_llsc;           /* true if LoadLink/StoreConditional exclusive */
+    TCGReg llsc_success;    /* true if the StoreConditional succeeded */
     TCGMemOp opc;
     TCGType type;           /* result type of a load */
     TCGReg addrlo_reg;      /* reg index for low word of guest virtual addr */
diff --git a/tcg/tcg.h b/tcg/tcg.h
index add7f75..0607d07 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -898,6 +898,15 @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, 
target_ulong addr,
                                     int mmu_idx, uintptr_t retaddr);
 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
                            int mmu_idx, uintptr_t retaddr);
+/* Exclusive variants */
+tcg_target_ulong helper_ret_ldlinkub_mmu(CPUArchState *env, target_ulong addr,
+                                       int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldlinkuw_mmu(CPUArchState *env, target_ulong addr,
+                                      int mmu_idx, uintptr_t retaddr);
+tcg_target_ulong helper_le_ldlinkul_mmu(CPUArchState *env, target_ulong addr,
+                                      int mmu_idx, uintptr_t retaddr);
+uint64_t helper_le_ldlinkq_mmu(CPUArchState *env, target_ulong addr,
+                             int mmu_idx, uintptr_t retaddr);
 
 /* Value sign-extended to tcg register size.  */
 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
@@ -925,6 +934,17 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong 
addr, uint32_t val,
                        int mmu_idx, uintptr_t retaddr);
 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
                        int mmu_idx, uintptr_t retaddr);
+/* Exclusive variants */
+uint32_t helper_ret_stcondb_mmu(CPUArchState *env, target_ulong addr, uint8_t 
val,
+                              int mmu_idx, uintptr_t retaddr);
+uint32_t helper_le_stcondw_mmu(CPUArchState *env, target_ulong addr, uint16_t 
val,
+                             int mmu_idx, uintptr_t retaddr);
+uint32_t helper_le_stcondl_mmu(CPUArchState *env, target_ulong addr, uint32_t 
val,
+                             int mmu_idx, uintptr_t retaddr);
+uint32_t helper_le_stcondq_mmu(CPUArchState *env, target_ulong addr, uint64_t 
val,
+                             int mmu_idx, uintptr_t retaddr);
+
+
 
 /* Temporary aliases until backends are converted.  */
 #ifdef TARGET_WORDS_BIGENDIAN
-- 
2.4.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]