qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 2/5] kvm: x86: Avoid runtime allocation of xsave buf


From: Marcelo Tosatti
Subject: [Qemu-devel] [PATCH 2/5] kvm: x86: Avoid runtime allocation of xsave buffer
Date: Thu, 22 Dec 2011 18:13:47 -0200

From: Jan Kiszka <address@hidden>

Keep a per-VCPU xsave buffer for kvm_put/get_xsave instead of
continuously allocating and freeing it on state sync.

Signed-off-by: Jan Kiszka <address@hidden>
Signed-off-by: Marcelo Tosatti <address@hidden>
---
 target-i386/cpu.h |    3 ++-
 target-i386/kvm.c |   15 +++++++--------
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index a08ce9d..37dde79 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -751,7 +751,8 @@ typedef struct CPUX86State {
     uint32_t cpuid_svm_features;
     bool tsc_valid;
     int tsc_khz;
-    
+    void *kvm_xsave_buf;
+
     /* in order to simplify APIC support, we leave this pointer to the
        user */
     struct DeviceState *apic_state;
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index d2f70f9..06f4401 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -516,6 +516,10 @@ int kvm_arch_init_vcpu(CPUState *env)
         }
     }
 
+    if (kvm_has_xsave()) {
+        env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
+    }
+
     return 0;
 }
 
@@ -771,15 +775,14 @@ static int kvm_put_fpu(CPUState *env)
 
 static int kvm_put_xsave(CPUState *env)
 {
-    int i, r;
-    struct kvm_xsave* xsave;
+    struct kvm_xsave* xsave = env->kvm_xsave_buf;
     uint16_t cwd, swd, twd;
+    int i, r;
 
     if (!kvm_has_xsave()) {
         return kvm_put_fpu(env);
     }
 
-    xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
     memset(xsave, 0, sizeof(struct kvm_xsave));
     twd = 0;
     swd = env->fpus & ~(7 << 11);
@@ -801,7 +804,6 @@ static int kvm_put_xsave(CPUState *env)
     memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
             sizeof env->ymmh_regs);
     r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
-    g_free(xsave);
     return r;
 }
 
@@ -978,7 +980,7 @@ static int kvm_get_fpu(CPUState *env)
 
 static int kvm_get_xsave(CPUState *env)
 {
-    struct kvm_xsave* xsave;
+    struct kvm_xsave* xsave = env->kvm_xsave_buf;
     int ret, i;
     uint16_t cwd, swd, twd;
 
@@ -986,10 +988,8 @@ static int kvm_get_xsave(CPUState *env)
         return kvm_get_fpu(env);
     }
 
-    xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
     ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
     if (ret < 0) {
-        g_free(xsave);
         return ret;
     }
 
@@ -1013,7 +1013,6 @@ static int kvm_get_xsave(CPUState *env)
     env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
     memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
             sizeof env->ymmh_regs);
-    g_free(xsave);
     return 0;
 }
 
-- 
1.7.6.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]