qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC PATCH 2/4] vfio-pci: Add support for MSI affinity


From: Alex Williamson
Subject: [Qemu-devel] [RFC PATCH 2/4] vfio-pci: Add support for MSI affinity
Date: Tue, 27 Nov 2012 15:00:41 -0700
User-agent: StGit/0.16

When MSI is accelerated through KVM the vectors are only programmed
when the guest first enables MSI support.  Subsequent writes to the
vector address or data fields are ignored.  Unfortunately that means
we're ignore updates done to adjust SMP affinity of the vectors.
MSI SMP affinity already works in non-KVM mode because the address
and data fields are read from their backing store on each interrupt.

This patch stores the MSIMessage programmed into KVM so that we can
determine when changes are made and update the routes.  The message
is stored for both MSI and MSI-X for consistency, but we only have
a use for them in MSI mode.

Signed-off-by: Alex Williamson <address@hidden>
---
 hw/vfio_pci.c |   31 ++++++++++++++++++++++++++++---
 1 file changed, 28 insertions(+), 3 deletions(-)

diff --git a/hw/vfio_pci.c b/hw/vfio_pci.c
index 7c27834..49b9550 100644
--- a/hw/vfio_pci.c
+++ b/hw/vfio_pci.c
@@ -75,6 +75,7 @@ struct VFIODevice;
 typedef struct VFIOMSIVector {
     EventNotifier interrupt; /* eventfd triggered on interrupt */
     struct VFIODevice *vdev; /* back pointer to device */
+    MSIMessage msg; /* cache the MSI message so we know when it changes */
     int virq; /* KVM irqchip route for QEMU bypass */
     bool use;
 } VFIOMSIVector;
@@ -574,6 +575,7 @@ static int vfio_msix_vector_use(PCIDevice *pdev,
 
     vector = &vdev->msi_vectors[nr];
     vector->vdev = vdev;
+    vector->msg = msg;
     vector->use = true;
 
     msix_vector_use(pdev, nr);
@@ -716,7 +718,6 @@ retry:
     vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
 
     for (i = 0; i < vdev->nr_vectors; i++) {
-        MSIMessage msg;
         VFIOMSIVector *vector = &vdev->msi_vectors[i];
 
         vector->vdev = vdev;
@@ -726,13 +727,13 @@ retry:
             error_report("vfio: Error: event_notifier_init failed\n");
         }
 
-        msg = msi_get_message(&vdev->pdev, i);
+        vector->msg = msi_get_message(&vdev->pdev, i);
 
         /*
          * Attempt to enable route through KVM irqchip,
          * default to userspace handling if unavailable.
          */
-        vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg);
+        vector->virq = kvm_irqchip_add_msi_route(kvm_state, vector->msg);
         if (vector->virq < 0 ||
             kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
                                            vector->virq) < 0) {
@@ -1022,6 +1023,30 @@ static void vfio_pci_write_config(PCIDevice *pdev, 
uint32_t addr,
             vfio_enable_msi(vdev);
         } else if (was_enabled && !is_enabled) {
             vfio_disable_msi(vdev);
+        } else if (was_enabled && is_enabled) {
+            int i;
+
+            for (i = 0; i < vdev->nr_vectors; i++) {
+                VFIOMSIVector *vector = &vdev->msi_vectors[i];
+                MSIMessage msg;
+
+                if (!vector->use || vector->virq < 0) {
+                    continue;
+                }
+
+                msg = msi_get_message(pdev, i);
+
+                if (msg.address != vector->msg.address ||
+                    msg.data != vector->msg.data) {
+
+                    DPRINTF("%s(%04x:%02x:%02x.%x) MSI vector %d changed\n",
+                            __func__, vdev->host.domain, vdev->host.bus,
+                            vdev->host.slot, vdev->host.function, i);
+
+                    kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg);
+                    vector->msg = msg;
+                }
+            }
         }
     }
 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]