[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v8 08/14] softmmu: Add history of excl accesses
From: |
Alvise Rigo |
Subject: |
[Qemu-devel] [RFC v8 08/14] softmmu: Add history of excl accesses |
Date: |
Tue, 19 Apr 2016 15:39:25 +0200 |
Add a circular buffer to store the hw addresses used in the last
EXCLUSIVE_HISTORY_LEN exclusive accesses.
When an address is pop'ed from the buffer, its page will be set as not
exclusive. In this way we avoid frequent set/unset of a page (causing
frequent flushes as well).
Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
cputlb.c | 21 +++++++++++++++++++++
exec.c | 19 +++++++++++++++++++
include/qom/cpu.h | 8 ++++++++
softmmu_llsc_template.h | 1 +
vl.c | 3 +++
5 files changed, 52 insertions(+)
diff --git a/cputlb.c b/cputlb.c
index 58d6f03..02b0d14 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -475,6 +475,27 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1,
target_ulong addr)
return qemu_ram_addr_from_host_nofail(p);
}
+/* Keep a circular array with the last excl_history.length addresses used for
+ * exclusive accesses. The exiting addresses are marked as non-exclusive. */
+extern CPUExclusiveHistory excl_history;
+static inline void excl_history_put_addr(hwaddr addr)
+{
+ hwaddr last;
+
+ /* Calculate the index of the next exclusive address */
+ excl_history.last_idx = (excl_history.last_idx + 1) % excl_history.length;
+
+ last = excl_history.c_array[excl_history.last_idx];
+
+ /* Unset EXCL bit of the oldest entry */
+ if (last != EXCLUSIVE_RESET_ADDR) {
+ cpu_physical_memory_unset_excl(last);
+ }
+
+ /* Add a new address, overwriting the oldest one */
+ excl_history.c_array[excl_history.last_idx] = addr & TARGET_PAGE_MASK;
+}
+
#define MMUSUFFIX _mmu
/* Generates LoadLink/StoreConditional helpers in softmmu_template.h */
diff --git a/exec.c b/exec.c
index cefee1b..3c54b92 100644
--- a/exec.c
+++ b/exec.c
@@ -177,6 +177,25 @@ struct CPUAddressSpace {
MemoryListener tcg_as_listener;
};
+/* Exclusive memory support */
+CPUExclusiveHistory excl_history;
+void cpu_exclusive_history_init(void)
+{
+ /* Initialize exclusive history for atomic instruction handling. */
+ if (tcg_enabled()) {
+ g_assert(EXCLUSIVE_HISTORY_CPU_LEN * max_cpus <= UINT16_MAX);
+ excl_history.length = EXCLUSIVE_HISTORY_CPU_LEN * max_cpus;
+ excl_history.c_array = g_malloc(excl_history.length * sizeof(hwaddr));
+ memset(excl_history.c_array, -1, excl_history.length * sizeof(hwaddr));
+ }
+}
+
+void cpu_exclusive_history_free(void)
+{
+ if (tcg_enabled()) {
+ g_free(excl_history.c_array);
+ }
+}
#endif
#if !defined(CONFIG_USER_ONLY)
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 014851e..de144f6 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -232,7 +232,15 @@ struct kvm_run;
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* Atomic insn translation TLB support. */
+typedef struct CPUExclusiveHistory {
+ uint16_t last_idx; /* index of last insertion */
+ uint16_t length; /* history's length, it depends on smp_cpus */
+ hwaddr *c_array; /* history's circular array */
+} CPUExclusiveHistory;
#define EXCLUSIVE_RESET_ADDR ULLONG_MAX
+#define EXCLUSIVE_HISTORY_CPU_LEN 256
+void cpu_exclusive_history_init(void);
+void cpu_exclusive_history_free(void);
/**
* CPUState:
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
index ca2ac95..1e24fec 100644
--- a/softmmu_llsc_template.h
+++ b/softmmu_llsc_template.h
@@ -77,6 +77,7 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong
addr,
CPUState *cpu;
cpu_physical_memory_set_excl(hw_addr);
+ excl_history_put_addr(hw_addr);
CPU_FOREACH(cpu) {
if (this_cpu != cpu) {
tlb_flush(cpu, 1);
diff --git a/vl.c b/vl.c
index f043009..b22d99b 100644
--- a/vl.c
+++ b/vl.c
@@ -547,6 +547,7 @@ static void res_free(void)
{
g_free(boot_splash_filedata);
boot_splash_filedata = NULL;
+ cpu_exclusive_history_free();
}
static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp)
@@ -4322,6 +4323,8 @@ int main(int argc, char **argv, char **envp)
configure_accelerator(current_machine);
+ cpu_exclusive_history_init();
+
if (qtest_chrdev) {
qtest_init(qtest_chrdev, qtest_log, &error_fatal);
}
--
2.8.0
- [Qemu-devel] [RFC v8 00/14] Slow-path for atomic instruction translation, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 03/14] softmmu: Simplify helper_*_st_name, wrap MMIO code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 05/14] softmmu: Add new TLB_EXCL flag, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 04/14] softmmu: Simplify helper_*_st_name, wrap RAM code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 01/14] exec.c: Add new exclusive bitmap to ram_list, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 06/14] qom: cpu: Add CPUClass hooks for exclusive range, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 09/14] softmmu: Honor the new exclusive bitmap, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 11/14] tcg: Create new runtime helpers for excl accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 10/14] softmmu: Support MMIO exclusive accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 08/14] softmmu: Add history of excl accesses,
Alvise Rigo <=
- [Qemu-devel] [RFC v8 12/14] target-arm: translate: Use ld/st excl for atomic insns, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 02/14] softmmu: Simplify helper_*_st_name, wrap unaligned code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 07/14] softmmu: Add helpers for a new slowpath, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 14/14] target-arm: aarch64: Use ls/st exclusive for atomic insns, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 13/14] target-arm: cpu64: use custom set_excl hook, Alvise Rigo, 2016/04/19