qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v7 09/10] virtio-pci: add support for configure interrupt


From: Jason Wang
Subject: Re: [PATCH v7 09/10] virtio-pci: add support for configure interrupt
Date: Thu, 3 Jun 2021 14:45:21 +0800
User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Thunderbird/78.10.2


在 2021/6/2 上午11:47, Cindy Lu 写道:
Add support for configure interrupt, use kvm_irqfd_assign and set the
gsi to kernel. When the configure notifier was eventfd_signal by host
kernel, this will finally inject an msix interrupt to guest

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
  hw/virtio/virtio-pci.c | 63 ++++++++++++++++++++++++++++++++++++++++--
  1 file changed, 60 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index f863c89de6..1e03f11a85 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -717,7 +717,8 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, 
int queue_no,
      VirtQueue *vq;
if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
-        return -1;
+        *n = virtio_get_config_notifier(vdev);
+        *vector = vdev->config_vector;
      } else {
          if (!virtio_queue_get_num(vdev, queue_no)) {
              return -1;
@@ -764,6 +765,10 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy 
*proxy, int nvqs)
      return ret;
  }
+static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
+{
+    return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
+}
static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
                          int queue_no)
@@ -792,6 +797,28 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
*proxy, int nvqs)
      }
  }
+static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
+{
+    kvm_virtio_pci_vector_release_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
+}


Newline please, can this survive from checkpatch.pl?


+static int virtio_pci_set_config_notifier(DeviceState *d, bool assign)
+{
+    VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+    EventNotifier *notifier = virtio_get_config_notifier(vdev);
+    int r = 0;
+    if (assign) {
+        r = event_notifier_init(notifier, 0);
+        virtio_set_notifier_fd_handler(vdev, VIRTIO_CONFIG_IRQ_IDX, true, 
true);
+        kvm_virtio_pci_vector_config_use(proxy);
+    } else {
+        virtio_set_notifier_fd_handler(vdev, VIRTIO_CONFIG_IRQ_IDX,
+                                             false, true);
+        kvm_virtio_pci_vector_config_release(proxy);
+        event_notifier_cleanup(notifier);
+    }
+    return r;
+}
  static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
                                         unsigned int queue_no,
                                         unsigned int vector,
@@ -873,9 +900,17 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, 
unsigned vector,
          }
          vq = virtio_vector_next_queue(vq);
      }
-
+    n = virtio_get_config_notifier(vdev);
+    ret = virtio_pci_vq_vector_unmask(proxy, VIRTIO_CONFIG_IRQ_IDX,
+                        vector, msg, n);
+    if (ret < 0) {
+        goto config_undo;
+    }


I'd do this before the loop, but need to validate whether the vector is the one that is used by the config interrupt.


      return 0;
+config_undo:
+    n = virtio_get_config_notifier(vdev);
+    virtio_pci_vq_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);


So unmask fail means it is still masked, we don't need to mask it again.


  undo:
      vq = virtio_vector_first_queue(vdev, vector);
      while (vq && unmasked >= 0) {
@@ -909,6 +944,8 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned 
vector)
          }
          vq = virtio_vector_next_queue(vq);
      }
+    n = virtio_get_config_notifier(vdev);
+    virtio_pci_vq_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
  }
static void virtio_pci_vector_poll(PCIDevice *dev,
@@ -942,6 +979,20 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
              msix_set_pending(dev, vector);
          }
      }
+   /*check for config interrupt*/
+   vector = vdev->config_vector;
+   notifier = virtio_get_config_notifier(vdev);
+   if (vector < vector_start || vector >= vector_end ||
+            !msix_is_masked(dev, vector)) {
+        return;
+   }
+   if (k->guest_notifier_pending) {
+        if (k->guest_notifier_pending(vdev, VIRTIO_CONFIG_IRQ_IDX)) {
+            msix_set_pending(dev, vector);
+        }
+   } else if (event_notifier_test_and_clear(notifier)) {
+        msix_set_pending(dev, vector);
+   }


Let's consider to unify the codes with vq vector here.


  }
static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
@@ -1002,6 +1053,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, 
int nvqs, bool assign)
          msix_unset_vector_notifiers(&proxy->pci_dev);
          if (proxy->vector_irqfd) {
              kvm_virtio_pci_vector_release(proxy, nvqs);
+            kvm_virtio_pci_vector_config_release(proxy);
              g_free(proxy->vector_irqfd);
              proxy->vector_irqfd = NULL;
          }
@@ -1029,6 +1081,10 @@ static int virtio_pci_set_guest_notifiers(DeviceState 
*d, int nvqs, bool assign)
                  goto assign_error;
              }
          }
+        r = virtio_pci_set_config_notifier(d, assign);
+        if (r < 0) {
+            goto config_error;
+        }
          r = msix_set_vector_notifiers(&proxy->pci_dev,
                                        virtio_pci_vector_unmask,
                                        virtio_pci_vector_mask,
@@ -1045,7 +1101,8 @@ notifiers_error:
          assert(assign);
          kvm_virtio_pci_vector_release(proxy, nvqs);
      }
-


Newline should be kept here.

Thanks


+config_error:
+    kvm_virtio_pci_vector_config_release(proxy);
  assign_error:
      /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. 
*/
      assert(assign);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]