qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH] ivshmem: use irqfd to interrupt among VMs


From: Liu Ping Fan
Subject: [Qemu-devel] [PATCH] ivshmem: use irqfd to interrupt among VMs
Date: Wed, 21 Nov 2012 14:02:31 +0800

From: Liu Ping Fan <address@hidden>

Using irqfd, so we can avoid switch between kernel and user when
VMs interrupts each other.

Signed-off-by: Liu Ping Fan <address@hidden>
---
 hw/ivshmem.c |   48 +++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 47 insertions(+), 1 deletions(-)

diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index f6dbb21..81c7354 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -19,6 +19,7 @@
 #include "hw.h"
 #include "pc.h"
 #include "pci.h"
+#include "msi.h"
 #include "msix.h"
 #include "kvm.h"
 #include "migration.h"
@@ -54,6 +55,11 @@ typedef struct EventfdEntry {
     int vector;
 } EventfdEntry;
 
+typedef struct IrqfdEntry {
+    int virq;
+    bool used;
+} IrqfdEntry;
+
 typedef struct IVShmemState {
     PCIDevice dev;
     uint32_t intrmask;
@@ -83,6 +89,8 @@ typedef struct IVShmemState {
     uint32_t vectors;
     uint32_t features;
     EventfdEntry *eventfd_table;
+    IrqfdEntry *vector_irqfd;
+    bool irqfd_enable;
 
     Error *migration_blocker;
 
@@ -632,6 +640,38 @@ static void ivshmem_write_config(PCIDevice *pci_dev, 
uint32_t address,
     msix_write_config(pci_dev, address, val, len);
 }
 
+static int ivshmem_vector_use(PCIDevice *dev, unsigned vector,
+                                     MSIMessage msg)
+{
+    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+    int virq;
+    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+
+    virq = kvm_irqchip_add_msi_route(kvm_state, msg);
+    if (virq >= 0 && kvm_irqchip_add_irqfd_notifier(kvm_state, n, virq) >= 0) {
+        s->vector_irqfd[vector].virq = virq;
+        s->vector_irqfd[vector].used = true;
+        qemu_chr_add_handlers(s->eventfd_chr[vector], NULL, NULL, NULL, NULL);
+    } else if (virq >= 0) {
+        kvm_irqchip_release_virq(kvm_state, virq);
+    }
+    return 0;
+}
+
+static void ivshmem_vector_release(PCIDevice *dev, unsigned vector)
+{
+    IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
+    int virq = s->vector_irqfd[vector].virq;
+
+    if (s->vector_irqfd[vector].used) {
+        kvm_irqchip_remove_irqfd_notifier(kvm_state, n, virq);
+        kvm_irqchip_release_virq(kvm_state, virq);
+        s->vector_irqfd[vector].virq = -1;
+        s->vector_irqfd[vector].used = false;
+    }
+}
+
 static int pci_ivshmem_init(PCIDevice *dev)
 {
     IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
@@ -759,7 +799,13 @@ static int pci_ivshmem_init(PCIDevice *dev)
     }
 
     s->dev.config_write = ivshmem_write_config;
-
+    if (kvm_gsi_routing_enabled()) {
+        s->irqfd_enable = msix_set_vector_notifiers(dev, ivshmem_vector_use,
+                    ivshmem_vector_release) >= 0 ? true : false;
+        if (s->irqfd_enable) {
+            s->vector_irqfd = g_new0(IrqfdEntry, s->vectors);
+        }
+    }
     return 0;
 }
 
-- 
1.7.4.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]