[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v7 06/16] qom: cpu: Add CPUClass hooks for exclusive r
From: |
Alvise Rigo |
Subject: |
[Qemu-devel] [RFC v7 06/16] qom: cpu: Add CPUClass hooks for exclusive range |
Date: |
Fri, 29 Jan 2016 10:32:35 +0100 |
The excl_protected_range is a hwaddr range set by the VCPU at the
execution of a LoadLink instruction. If a normal access writes to this
range, the corresponding StoreCond will fail.
Each architecture can set the exclusive range when issuing the LoadLink
operation through a CPUClass hook. This comes in handy to emulate, for
instance, the exclusive monitor implemented in some ARM architectures
(more precisely, the Exclusive Reservation Granule).
In addition, add another CPUClass hook called to decide whether a
StoreCond has to fail or not.
Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
include/qom/cpu.h | 15 +++++++++++++++
qom/cpu.c | 20 ++++++++++++++++++++
2 files changed, 35 insertions(+)
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 2e5229d..682c81d 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -29,6 +29,7 @@
#include "qemu/queue.h"
#include "qemu/thread.h"
#include "qemu/typedefs.h"
+#include "qemu/range.h"
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
void *opaque);
@@ -183,6 +184,12 @@ typedef struct CPUClass {
void (*cpu_exec_exit)(CPUState *cpu);
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
+ /* Atomic instruction handling */
+ void (*cpu_set_excl_protected_range)(CPUState *cpu, hwaddr addr,
+ hwaddr size);
+ int (*cpu_valid_excl_access)(CPUState *cpu, hwaddr addr,
+ hwaddr size);
+
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
} CPUClass;
@@ -219,6 +226,9 @@ struct kvm_run;
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+/* Atomic insn translation TLB support. */
+#define EXCLUSIVE_RESET_ADDR ULLONG_MAX
+
/**
* CPUState:
* @cpu_index: CPU index (informative).
@@ -341,6 +351,11 @@ struct CPUState {
*/
bool throttle_thread_scheduled;
+ /* vCPU's exclusive addresses range.
+ * The address is set to EXCLUSIVE_RESET_ADDR if the vCPU is not
+ * in the middle of a LL/SC. */
+ struct Range excl_protected_range;
+
/* Note that this is accessed at the start of every TB via a negative
offset from AREG0. Leave this field at the end so as to make the
(absolute value) offset as small as possible. This reduces code
diff --git a/qom/cpu.c b/qom/cpu.c
index 8f537a4..a5d360c 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -203,6 +203,24 @@ static bool cpu_common_exec_interrupt(CPUState *cpu, int
int_req)
return false;
}
+static void cpu_common_set_excl_range(CPUState *cpu, hwaddr addr, hwaddr size)
+{
+ cpu->excl_protected_range.begin = addr;
+ cpu->excl_protected_range.end = addr + size;
+}
+
+static int cpu_common_valid_excl_access(CPUState *cpu, hwaddr addr, hwaddr
size)
+{
+ /* Check if the excl range completely covers the access */
+ if (cpu->excl_protected_range.begin <= addr &&
+ cpu->excl_protected_range.end >= addr + size) {
+
+ return 1;
+ }
+
+ return 0;
+}
+
void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
@@ -355,6 +373,8 @@ static void cpu_class_init(ObjectClass *klass, void *data)
k->cpu_exec_enter = cpu_common_noop;
k->cpu_exec_exit = cpu_common_noop;
k->cpu_exec_interrupt = cpu_common_exec_interrupt;
+ k->cpu_set_excl_protected_range = cpu_common_set_excl_range;
+ k->cpu_valid_excl_access = cpu_common_valid_excl_access;
dc->realize = cpu_common_realizefn;
/*
* Reason: CPUs still need special care by board code: wiring up
--
2.7.0
- [Qemu-devel] [RFC v7 00/16] Slow-path for atomic instruction translation, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 05/16] softmmu: Add new TLB_EXCL flag, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 02/16] softmmu: Simplify helper_*_st_name, wrap unaligned code, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 03/16] softmmu: Simplify helper_*_st_name, wrap MMIO code, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 06/16] qom: cpu: Add CPUClass hooks for exclusive range,
Alvise Rigo <=
- [Qemu-devel] [RFC v7 08/16] softmmu: Honor the new exclusive bitmap, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 01/16] exec.c: Add new exclusive bitmap to ram_list, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 04/16] softmmu: Simplify helper_*_st_name, wrap RAM code, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 09/16] softmmu: Include MMIO/invalid exclusive accesses, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 15/16] target-arm: cpu64: use custom set_excl hook, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 10/16] softmmu: Protect MMIO exclusive range, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 14/16] target-arm: translate: Use ld/st excl for atomic insns, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 16/16] target-arm: aarch64: add atomic instructions, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 07/16] softmmu: Add helpers for a new slowpath, Alvise Rigo, 2016/01/29
- [Qemu-devel] [RFC v7 12/16] configure: Use slow-path for atomic only when the softmmu is enabled, Alvise Rigo, 2016/01/29