[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PULL 10/28] cpus: pass CPUState to run_on_cpu helpers
From: |
Paolo Bonzini |
Subject: |
[Qemu-devel] [PULL 10/28] cpus: pass CPUState to run_on_cpu helpers |
Date: |
Mon, 26 Sep 2016 15:40:40 +0200 |
From: Alex Bennée <address@hidden>
CPUState is a fairly common pointer to pass to these helpers. This means
if you need other arguments for the async_run_on_cpu case you end up
having to do a g_malloc to stuff additional data into the routine. For
the current users this isn't a massive deal but for MTTCG this gets
cumbersome when the only other parameter is often an address.
This adds the typedef run_on_cpu_func for helper functions which has an
explicit CPUState * passed as the first parameter. All the users of
run_on_cpu and async_run_on_cpu have had their helpers updated to use
CPUState where available.
Signed-off-by: Alex Bennée <address@hidden>
[Sergey Fedorov:
- eliminate more CPUState in user data;
- remove unnecessary user data passing;
- fix target-s390x/kvm.c and target-s390x/misc_helper.c]
Signed-off-by: Sergey Fedorov <address@hidden>
Acked-by: David Gibson <address@hidden> (ppc parts)
Reviewed-by: Christian Borntraeger <address@hidden> (s390 parts)
Signed-off-by: Alex Bennée <address@hidden>
Message-Id: <address@hidden>
Reviewed-by: Richard Henderson <address@hidden>
Signed-off-by: Paolo Bonzini <address@hidden>
---
cpus.c | 15 ++++---
hw/i386/kvm/apic.c | 5 +--
hw/i386/kvmvapic.c | 6 +--
hw/ppc/ppce500_spin.c | 31 +++++----------
hw/ppc/spapr.c | 6 +--
hw/ppc/spapr_hcall.c | 17 ++++----
include/qom/cpu.h | 8 ++--
kvm-all.c | 21 ++++------
target-i386/helper.c | 19 ++++-----
target-i386/kvm.c | 6 +--
target-s390x/cpu.c | 4 +-
target-s390x/cpu.h | 7 +---
target-s390x/kvm.c | 98 +++++++++++++++++++++++-----------------------
target-s390x/misc_helper.c | 4 +-
14 files changed, 109 insertions(+), 138 deletions(-)
diff --git a/cpus.c b/cpus.c
index e39ccb7..1a2a9b0 100644
--- a/cpus.c
+++ b/cpus.c
@@ -557,9 +557,8 @@ static const VMStateDescription vmstate_timers = {
}
};
-static void cpu_throttle_thread(void *opaque)
+static void cpu_throttle_thread(CPUState *cpu, void *opaque)
{
- CPUState *cpu = opaque;
double pct;
double throttle_ratio;
long sleeptime_ns;
@@ -589,7 +588,7 @@ static void cpu_throttle_timer_tick(void *opaque)
}
CPU_FOREACH(cpu) {
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
- async_run_on_cpu(cpu, cpu_throttle_thread, cpu);
+ async_run_on_cpu(cpu, cpu_throttle_thread, NULL);
}
}
@@ -917,12 +916,12 @@ void qemu_init_cpu_loop(void)
qemu_thread_get_self(&io_thread);
}
-void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
+void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
{
struct qemu_work_item wi;
if (qemu_cpu_is_self(cpu)) {
- func(data);
+ func(cpu, data);
return;
}
@@ -950,12 +949,12 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data),
void *data)
}
}
-void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
+void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
{
struct qemu_work_item *wi;
if (qemu_cpu_is_self(cpu)) {
- func(data);
+ func(cpu, data);
return;
}
@@ -1006,7 +1005,7 @@ static void flush_queued_work(CPUState *cpu)
cpu->queued_work_last = NULL;
}
qemu_mutex_unlock(&cpu->work_mutex);
- wi->func(wi->data);
+ wi->func(cpu, wi->data);
qemu_mutex_lock(&cpu->work_mutex);
if (wi->free) {
g_free(wi);
diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c
index f57fed1..c016e63 100644
--- a/hw/i386/kvm/apic.c
+++ b/hw/i386/kvm/apic.c
@@ -125,7 +125,7 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
}
}
-static void kvm_apic_put(void *data)
+static void kvm_apic_put(CPUState *cs, void *data)
{
APICCommonState *s = data;
struct kvm_lapic_state kapic;
@@ -146,10 +146,9 @@ static void kvm_apic_post_load(APICCommonState *s)
run_on_cpu(CPU(s->cpu), kvm_apic_put, s);
}
-static void do_inject_external_nmi(void *data)
+static void do_inject_external_nmi(CPUState *cpu, void *data)
{
APICCommonState *s = data;
- CPUState *cpu = CPU(s->cpu);
uint32_t lvt;
int ret;
diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c
index a1cd9b5..74a549b 100644
--- a/hw/i386/kvmvapic.c
+++ b/hw/i386/kvmvapic.c
@@ -483,7 +483,7 @@ typedef struct VAPICEnableTPRReporting {
bool enable;
} VAPICEnableTPRReporting;
-static void vapic_do_enable_tpr_reporting(void *data)
+static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data)
{
VAPICEnableTPRReporting *info = data;
@@ -734,10 +734,10 @@ static void vapic_realize(DeviceState *dev, Error **errp)
nb_option_roms++;
}
-static void do_vapic_enable(void *data)
+static void do_vapic_enable(CPUState *cs, void *data)
{
VAPICROMState *s = data;
- X86CPU *cpu = X86_CPU(first_cpu);
+ X86CPU *cpu = X86_CPU(cs);
static const uint8_t enabled = 1;
cpu_physical_memory_write(s->vapic_paddr + offsetof(VAPICState, enabled),
diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index 22c584e..8e16f65 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -54,11 +54,6 @@ typedef struct SpinState {
SpinInfo spin[MAX_CPUS];
} SpinState;
-typedef struct spin_kick {
- PowerPCCPU *cpu;
- SpinInfo *spin;
-} SpinKick;
-
static void spin_reset(void *opaque)
{
SpinState *s = opaque;
@@ -89,16 +84,15 @@ static void mmubooke_create_initial_mapping(CPUPPCState
*env,
env->tlb_dirty = true;
}
-static void spin_kick(void *data)
+static void spin_kick(CPUState *cs, void *data)
{
- SpinKick *kick = data;
- CPUState *cpu = CPU(kick->cpu);
- CPUPPCState *env = &kick->cpu->env;
- SpinInfo *curspin = kick->spin;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ SpinInfo *curspin = data;
hwaddr map_size = 64 * 1024 * 1024;
hwaddr map_start;
- cpu_synchronize_state(cpu);
+ cpu_synchronize_state(cs);
stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]);
env->nip = ldq_p(&curspin->addr) & (map_size - 1);
env->gpr[3] = ldq_p(&curspin->r3);
@@ -112,10 +106,10 @@ static void spin_kick(void *data)
map_start = ldq_p(&curspin->addr) & ~(map_size - 1);
mmubooke_create_initial_mapping(env, 0, map_start, map_size);
- cpu->halted = 0;
- cpu->exception_index = -1;
- cpu->stopped = false;
- qemu_cpu_kick(cpu);
+ cs->halted = 0;
+ cs->exception_index = -1;
+ cs->stopped = false;
+ qemu_cpu_kick(cs);
}
static void spin_write(void *opaque, hwaddr addr, uint64_t value,
@@ -153,12 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t
value,
if (!(ldq_p(&curspin->addr) & 1)) {
/* run CPU */
- SpinKick kick = {
- .cpu = POWERPC_CPU(cpu),
- .spin = curspin,
- };
-
- run_on_cpu(cpu, spin_kick, &kick);
+ run_on_cpu(cpu, spin_kick, curspin);
}
}
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 9b506d5..aa067ae 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -2134,10 +2134,8 @@ static void spapr_machine_finalizefn(Object *obj)
g_free(spapr->kvm_type);
}
-static void ppc_cpu_do_nmi_on_cpu(void *arg)
+static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, void *arg)
{
- CPUState *cs = arg;
-
cpu_synchronize_state(cs);
ppc_cpu_do_system_reset(cs);
}
@@ -2147,7 +2145,7 @@ static void spapr_nmi(NMIState *n, int cpu_index, Error
**errp)
CPUState *cs;
CPU_FOREACH(cs) {
- async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, cs);
+ async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, NULL);
}
}
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 290a712..c5e7e8c 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -13,19 +13,18 @@
#include "kvm_ppc.h"
struct SPRSyncState {
- CPUState *cs;
int spr;
target_ulong value;
target_ulong mask;
};
-static void do_spr_sync(void *arg)
+static void do_spr_sync(CPUState *cs, void *arg)
{
struct SPRSyncState *s = arg;
- PowerPCCPU *cpu = POWERPC_CPU(s->cs);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
- cpu_synchronize_state(s->cs);
+ cpu_synchronize_state(cs);
env->spr[s->spr] &= ~s->mask;
env->spr[s->spr] |= s->value;
}
@@ -34,7 +33,6 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
target_ulong mask)
{
struct SPRSyncState s = {
- .cs = cs,
.spr = spr,
.value = value,
.mask = mask
@@ -909,17 +907,17 @@ static target_ulong cas_get_option_vector(int vector,
target_ulong table)
}
typedef struct {
- PowerPCCPU *cpu;
uint32_t cpu_version;
Error *err;
} SetCompatState;
-static void do_set_compat(void *arg)
+static void do_set_compat(CPUState *cs, void *arg)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
SetCompatState *s = arg;
- cpu_synchronize_state(CPU(s->cpu));
- ppc_set_compat(s->cpu, s->cpu_version, &s->err);
+ cpu_synchronize_state(cs);
+ ppc_set_compat(cpu, s->cpu_version, &s->err);
}
#define get_compat_level(cpuver) ( \
@@ -1015,7 +1013,6 @@ static target_ulong
h_client_architecture_support(PowerPCCPU *cpu_,
if (old_cpu_version != cpu_version) {
CPU_FOREACH(cs) {
SetCompatState s = {
- .cpu = POWERPC_CPU(cs),
.cpu_version = cpu_version,
.err = NULL,
};
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index ce0c406..4aa9e61 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -232,9 +232,11 @@ struct kvm_run;
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* work queue */
+typedef void (*run_on_cpu_func)(CPUState *cpu, void *data);
+
struct qemu_work_item {
struct qemu_work_item *next;
- void (*func)(void *data);
+ run_on_cpu_func func;
void *data;
int done;
bool free;
@@ -623,7 +625,7 @@ bool cpu_is_stopped(CPUState *cpu);
*
* Schedules the function @func for execution on the vCPU @cpu.
*/
-void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
+void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
/**
* async_run_on_cpu:
@@ -633,7 +635,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data),
void *data);
*
* Schedules the function @func for execution on the vCPU @cpu asynchronously.
*/
-void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
+void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
/**
* qemu_get_cpu:
diff --git a/kvm-all.c b/kvm-all.c
index 8a4382e..fc2898a 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1847,10 +1847,8 @@ void kvm_flush_coalesced_mmio_buffer(void)
s->coalesced_flush_in_progress = false;
}
-static void do_kvm_cpu_synchronize_state(void *arg)
+static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
{
- CPUState *cpu = arg;
-
if (!cpu->kvm_vcpu_dirty) {
kvm_arch_get_registers(cpu);
cpu->kvm_vcpu_dirty = true;
@@ -1860,34 +1858,30 @@ static void do_kvm_cpu_synchronize_state(void *arg)
void kvm_cpu_synchronize_state(CPUState *cpu)
{
if (!cpu->kvm_vcpu_dirty) {
- run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_state, NULL);
}
}
-static void do_kvm_cpu_synchronize_post_reset(void *arg)
+static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg)
{
- CPUState *cpu = arg;
-
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
cpu->kvm_vcpu_dirty = false;
}
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
{
- run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu);
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, NULL);
}
-static void do_kvm_cpu_synchronize_post_init(void *arg)
+static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg)
{
- CPUState *cpu = arg;
-
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
cpu->kvm_vcpu_dirty = false;
}
void kvm_cpu_synchronize_post_init(CPUState *cpu)
{
- run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu);
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, NULL);
}
int kvm_cpu_exec(CPUState *cpu)
@@ -2216,7 +2210,7 @@ struct kvm_set_guest_debug_data {
int err;
};
-static void kvm_invoke_set_guest_debug(void *data)
+static void kvm_invoke_set_guest_debug(CPUState *unused_cpu, void *data)
{
struct kvm_set_guest_debug_data *dbg_data = data;
@@ -2234,7 +2228,6 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long
reinject_trap)
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
}
kvm_arch_update_guest_debug(cpu, &data.dbg);
- data.cpu = cpu;
run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
return data.err;
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 1c250b8..9bc961b 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -1113,7 +1113,6 @@ out:
typedef struct MCEInjectionParams {
Monitor *mon;
- X86CPU *cpu;
int bank;
uint64_t status;
uint64_t mcg_status;
@@ -1122,14 +1121,14 @@ typedef struct MCEInjectionParams {
int flags;
} MCEInjectionParams;
-static void do_inject_x86_mce(void *data)
+static void do_inject_x86_mce(CPUState *cs, void *data)
{
MCEInjectionParams *params = data;
- CPUX86State *cenv = ¶ms->cpu->env;
- CPUState *cpu = CPU(params->cpu);
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *cenv = &cpu->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
- cpu_synchronize_state(cpu);
+ cpu_synchronize_state(cs);
/*
* If there is an MCE exception being processed, ignore this SRAO MCE
@@ -1149,7 +1148,7 @@ static void do_inject_x86_mce(void *data)
if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled\n",
- cpu->cpu_index);
+ cs->cpu_index);
return;
}
@@ -1161,7 +1160,7 @@ static void do_inject_x86_mce(void *data)
monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled for"
" bank %d\n",
- cpu->cpu_index, params->bank);
+ cs->cpu_index, params->bank);
return;
}
@@ -1170,7 +1169,7 @@ static void do_inject_x86_mce(void *data)
monitor_printf(params->mon,
"CPU %d: Previous MCE still in progress, raising"
" triple fault\n",
- cpu->cpu_index);
+ cs->cpu_index);
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
qemu_system_reset_request();
return;
@@ -1182,7 +1181,7 @@ static void do_inject_x86_mce(void *data)
banks[3] = params->misc;
cenv->mcg_status = params->mcg_status;
banks[1] = params->status;
- cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
+ cpu_interrupt(cs, CPU_INTERRUPT_MCE);
} else if (!(banks[1] & MCI_STATUS_VAL)
|| !(banks[1] & MCI_STATUS_UC)) {
if (banks[1] & MCI_STATUS_VAL) {
@@ -1204,7 +1203,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int
bank,
CPUX86State *cenv = &cpu->env;
MCEInjectionParams params = {
.mon = mon,
- .cpu = cpu,
.bank = bank,
.status = status,
.mcg_status = mcg_status,
@@ -1245,7 +1243,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int
bank,
if (other_cs == cs) {
continue;
}
- params.cpu = X86_CPU(other_cs);
run_on_cpu(other_cs, do_inject_x86_mce, ¶ms);
}
}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index a0e42b2..1955a6b 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -156,10 +156,8 @@ static int kvm_get_tsc(CPUState *cs)
return 0;
}
-static inline void do_kvm_synchronize_tsc(void *arg)
+static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg)
{
- CPUState *cpu = arg;
-
kvm_get_tsc(cpu);
}
@@ -169,7 +167,7 @@ void kvm_synchronize_all_tsc(void)
if (kvm_enabled()) {
CPU_FOREACH(cpu) {
- run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu);
+ run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL);
}
}
}
diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c
index 2f3c8e2..35ae2ce 100644
--- a/target-s390x/cpu.c
+++ b/target-s390x/cpu.c
@@ -164,7 +164,7 @@ static void s390_cpu_machine_reset_cb(void *opaque)
{
S390CPU *cpu = opaque;
- run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, CPU(cpu));
+ run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, NULL);
}
#endif
@@ -220,7 +220,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error
**errp)
s390_cpu_gdb_init(cs);
qemu_init_vcpu(cs);
#if !defined(CONFIG_USER_ONLY)
- run_on_cpu(cs, s390_do_cpu_full_reset, cs);
+ run_on_cpu(cs, s390_do_cpu_full_reset, NULL);
#else
cpu_reset(cs);
#endif
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index 5645e06..4fb34b5 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -502,17 +502,14 @@ static inline hwaddr decode_basedisp_s(CPUS390XState
*env, uint32_t ipb,
#define decode_basedisp_rs decode_basedisp_s
/* helper functions for run_on_cpu() */
-static inline void s390_do_cpu_reset(void *arg)
+static inline void s390_do_cpu_reset(CPUState *cs, void *arg)
{
- CPUState *cs = arg;
S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
scc->cpu_reset(cs);
}
-static inline void s390_do_cpu_full_reset(void *arg)
+static inline void s390_do_cpu_full_reset(CPUState *cs, void *arg)
{
- CPUState *cs = arg;
-
cpu_reset(cs);
}
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 4b847a3..fd929e8 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -1385,7 +1385,6 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run,
uint32_t ipb)
}
typedef struct SigpInfo {
- S390CPU *cpu;
uint64_t param;
int cc;
uint64_t *status_reg;
@@ -1398,38 +1397,40 @@ static void set_sigp_status(SigpInfo *si, uint64_t
status)
si->cc = SIGP_CC_STATUS_STORED;
}
-static void sigp_start(void *arg)
+static void sigp_start(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg;
- if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
return;
}
- s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static void sigp_stop(void *arg)
+static void sigp_stop(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg;
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
};
- if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) {
+ if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
return;
}
/* disabled wait - sleeping in user space */
- if (CPU(si->cpu)->halted) {
- s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
+ if (cs->halted) {
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
} else {
/* execute the stop function */
- si->cpu->env.sigp_order = SIGP_STOP;
- kvm_s390_vcpu_interrupt(si->cpu, &irq);
+ cpu->env.sigp_order = SIGP_STOP;
+ kvm_s390_vcpu_interrupt(cpu, &irq);
}
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
@@ -1496,56 +1497,58 @@ static int kvm_s390_store_status(S390CPU *cpu, hwaddr
addr, bool store_arch)
return 0;
}
-static void sigp_stop_and_store_status(void *arg)
+static void sigp_stop_and_store_status(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg;
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
};
/* disabled wait - sleeping in user space */
- if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING &&
- CPU(si->cpu)->halted) {
- s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
+ if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
}
- switch (s390_cpu_get_state(si->cpu)) {
+ switch (s390_cpu_get_state(cpu)) {
case CPU_STATE_OPERATING:
- si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
- kvm_s390_vcpu_interrupt(si->cpu, &irq);
+ cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
+ kvm_s390_vcpu_interrupt(cpu, &irq);
/* store will be performed when handling the stop intercept */
break;
case CPU_STATE_STOPPED:
/* already stopped, just store the status */
- cpu_synchronize_state(CPU(si->cpu));
- kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
+ cpu_synchronize_state(cs);
+ kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
break;
}
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static void sigp_store_status_at_address(void *arg)
+static void sigp_store_status_at_address(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg;
uint32_t address = si->param & 0x7ffffe00u;
/* cpu has to be stopped */
- if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
return;
}
- cpu_synchronize_state(CPU(si->cpu));
+ cpu_synchronize_state(cs);
- if (kvm_s390_store_status(si->cpu, address, false)) {
+ if (kvm_s390_store_status(cpu, address, false)) {
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
return;
}
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static void sigp_store_adtl_status(void *arg)
+static void sigp_store_adtl_status(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg;
if (!s390_has_feat(S390_FEAT_VECTOR)) {
@@ -1554,7 +1557,7 @@ static void sigp_store_adtl_status(void *arg)
}
/* cpu has to be stopped */
- if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
return;
}
@@ -1565,31 +1568,32 @@ static void sigp_store_adtl_status(void *arg)
return;
}
- cpu_synchronize_state(CPU(si->cpu));
+ cpu_synchronize_state(cs);
- if (kvm_s390_store_adtl_status(si->cpu, si->param)) {
+ if (kvm_s390_store_adtl_status(cpu, si->param)) {
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
return;
}
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static void sigp_restart(void *arg)
+static void sigp_restart(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg;
struct kvm_s390_irq irq = {
.type = KVM_S390_RESTART,
};
- switch (s390_cpu_get_state(si->cpu)) {
+ switch (s390_cpu_get_state(cpu)) {
case CPU_STATE_STOPPED:
/* the restart irq has to be delivered prior to any other pending irq
*/
- cpu_synchronize_state(CPU(si->cpu));
- do_restart_interrupt(&si->cpu->env);
- s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
+ cpu_synchronize_state(cs);
+ do_restart_interrupt(&cpu->env);
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
break;
case CPU_STATE_OPERATING:
- kvm_s390_vcpu_interrupt(si->cpu, &irq);
+ kvm_s390_vcpu_interrupt(cpu, &irq);
break;
}
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
@@ -1597,20 +1601,18 @@ static void sigp_restart(void *arg)
int kvm_s390_cpu_restart(S390CPU *cpu)
{
- SigpInfo si = {
- .cpu = cpu,
- };
+ SigpInfo si = {};
run_on_cpu(CPU(cpu), sigp_restart, &si);
DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
return 0;
}
-static void sigp_initial_cpu_reset(void *arg)
+static void sigp_initial_cpu_reset(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
SigpInfo *si = arg;
- CPUState *cs = CPU(si->cpu);
- S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
cpu_synchronize_state(cs);
scc->initial_cpu_reset(cs);
@@ -1618,11 +1620,11 @@ static void sigp_initial_cpu_reset(void *arg)
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static void sigp_cpu_reset(void *arg)
+static void sigp_cpu_reset(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
SigpInfo *si = arg;
- CPUState *cs = CPU(si->cpu);
- S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
cpu_synchronize_state(cs);
scc->cpu_reset(cs);
@@ -1630,12 +1632,13 @@ static void sigp_cpu_reset(void *arg)
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
-static void sigp_set_prefix(void *arg)
+static void sigp_set_prefix(CPUState *cs, void *arg)
{
+ S390CPU *cpu = S390_CPU(cs);
SigpInfo *si = arg;
uint32_t addr = si->param & 0x7fffe000u;
- cpu_synchronize_state(CPU(si->cpu));
+ cpu_synchronize_state(cs);
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(struct LowCore), false)) {
@@ -1644,13 +1647,13 @@ static void sigp_set_prefix(void *arg)
}
/* cpu has to be stopped */
- if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
return;
}
- si->cpu->env.psa = addr;
- cpu_synchronize_post_init(CPU(si->cpu));
+ cpu->env.psa = addr;
+ cpu_synchronize_post_init(cs);
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
@@ -1658,7 +1661,6 @@ static int handle_sigp_single_dst(S390CPU *dst_cpu,
uint8_t order,
uint64_t param, uint64_t *status_reg)
{
SigpInfo si = {
- .cpu = dst_cpu,
.param = param,
.status_reg = status_reg,
};
diff --git a/target-s390x/misc_helper.c b/target-s390x/misc_helper.c
index 86da194..4df2ec6 100644
--- a/target-s390x/misc_helper.c
+++ b/target-s390x/misc_helper.c
@@ -126,7 +126,7 @@ static int modified_clear_reset(S390CPU *cpu)
pause_all_vcpus();
cpu_synchronize_all_states();
CPU_FOREACH(t) {
- run_on_cpu(t, s390_do_cpu_full_reset, t);
+ run_on_cpu(t, s390_do_cpu_full_reset, NULL);
}
s390_cmma_reset();
subsystem_reset();
@@ -145,7 +145,7 @@ static int load_normal_reset(S390CPU *cpu)
pause_all_vcpus();
cpu_synchronize_all_states();
CPU_FOREACH(t) {
- run_on_cpu(t, s390_do_cpu_reset, t);
+ run_on_cpu(t, s390_do_cpu_reset, NULL);
}
s390_cmma_reset();
subsystem_reset();
--
2.7.4
- [Qemu-devel] [PULL 00/28] Misc patches for 2016-09-26, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 01/28] memory: introduce IOMMUNotifier and its caps, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 03/28] intel_iommu: allow UNMAP notifiers, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 02/28] memory: introduce IOMMUOps.notify_flag_changed, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 04/28] x86: ioapic: boost default version to 0x20, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 05/28] checkpatch: downgrade "architecture specific defines should be avoided", Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 07/28] migration: sync all address spaces, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 09/28] build-sys: put glib_cflags in QEMU_CFLAGS, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 08/28] build-sys: remove unused GLIB_CFLAGS, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 06/28] compiler: Swap 'public domain' header for license, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 10/28] cpus: pass CPUState to run_on_cpu helpers,
Paolo Bonzini <=
- [Qemu-devel] [PULL 12/28] cpus: Rename flush_queued_work(), Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 11/28] cpus: Move common code out of {async_, }run_on_cpu(), Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 13/28] linux-user: Use QemuMutex and QemuCond, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 14/28] linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick(), Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 15/28] cpus-common: move CPU list management to common code, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 17/28] cpus-common: fix uninitialized variable use in run_on_cpu, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 16/28] cpus-common: move CPU work item management to common code, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 19/28] docs: include formal model for TCG exclusive sections, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 20/28] cpus-common: always defer async_run_on_cpu work items, Paolo Bonzini, 2016/09/26
- [Qemu-devel] [PULL 21/28] cpus-common: remove redundant call to exclusive_idle(), Paolo Bonzini, 2016/09/26