qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] igb: fix VFs traffic with IOMMU


From: Akihiko Odaki
Subject: Re: [PATCH] igb: fix VFs traffic with IOMMU
Date: Thu, 6 Apr 2023 00:39:43 +0900
User-agent: Mozilla/5.0 (X11; Linux aarch64; rv:102.0) Gecko/20100101 Thunderbird/102.9.0

Hi,

I believe this problem is already addressed with commit f4fdaf009cc85e95a00aba47a6b5b9df920d51c4.

Regards,
Akihiko Odaki

On 2023/04/05 23:23, Tomasz Dzieciol wrote:
Use proper PCI device for net_tx_pkt provided to
net_tx_pkt_add_raw_fragment.

Signed-off-by: Tomasz Dzieciol <t.dzieciol@partner.samsung.com>
---
  hw/net/igb.c      |  8 ++++++++
  hw/net/igb_core.c | 47 +++++++++++++++++++++++++++++++++++++++++------
  hw/net/igb_core.h |  3 +++
  3 files changed, 52 insertions(+), 6 deletions(-)

diff --git a/hw/net/igb.c b/hw/net/igb.c
index c6d753df87..a67497ff48 100644
--- a/hw/net/igb.c
+++ b/hw/net/igb.c
@@ -97,11 +97,19 @@ struct IGBState {
  static void igb_write_config(PCIDevice *dev, uint32_t addr,
      uint32_t val, int len)
  {
+    uint16_t num_vfs_curr;
      IGBState *s = IGB(dev);
+    PCIESriovPF *pf = &dev->exp.sriov_pf;
+ uint16_t num_vfs_prev = pf->num_vfs;
      trace_igb_write_config(addr, val, len);
      pci_default_write_config(dev, addr, val, len);
+ num_vfs_curr = pf->num_vfs;
+    if (num_vfs_curr != num_vfs_prev) {
+        igb_core_num_vfs_change_handle(&s->core);
+    }
+
      if (range_covers_byte(addr, len, PCI_COMMAND) &&
          (dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
          igb_start_recv(&s->core);
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index a7c7bfdc75..e1b99f312b 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -3810,24 +3810,59 @@ igb_vm_state_change(void *opaque, bool running, 
RunState state)
      }
  }
+static void
+igb_core_init_queues_tx_packet(IGBCore *core)
+{
+    PCIDevice *dev;
+    int i;
+
+    for (i = 0; i < IGB_NUM_QUEUES; i++) {
+        dev = pcie_sriov_get_vf_at_index(core->owner, i % 8);
+        if (!dev) {
+            dev = core->owner;
+        }
+
+        net_tx_pkt_init(&core->tx[i].tx_pkt, dev, E1000E_MAX_TX_FRAGS);
+    }
+}
+
+static void
+igb_core_uninit_queues_tx_packet(IGBCore *core)
+{
+    int i;
+
+    for (i = 0; i < IGB_NUM_QUEUES; i++) {
+        net_tx_pkt_reset(core->tx[i].tx_pkt);
+        net_tx_pkt_uninit(core->tx[i].tx_pkt);
+    }
+}
+
+static void
+igb_core_reinit_queues_tx_packet(IGBCore *core)
+{
+    igb_core_uninit_queues_tx_packet(core);
+    igb_core_init_queues_tx_packet(core);
+}
+
+void
+igb_core_num_vfs_change_handle(IGBCore *core)
+{
+    igb_core_reinit_queues_tx_packet(core);
+}
+
  void
  igb_core_pci_realize(IGBCore        *core,
                       const uint16_t *eeprom_templ,
                       uint32_t        eeprom_size,
                       const uint8_t  *macaddr)
  {
-    int i;
-
      core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
                                         igb_autoneg_timer, core);
      igb_intrmgr_pci_realize(core);
core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core); - for (i = 0; i < IGB_NUM_QUEUES; i++) {
-        net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
-    }
-
+    igb_core_init_queues_tx_packet(core);
      net_rx_pkt_init(&core->rx_pkt);
e1000x_core_prepare_eeprom(core->eeprom,
diff --git a/hw/net/igb_core.h b/hw/net/igb_core.h
index 814c1e264b..8a32195d74 100644
--- a/hw/net/igb_core.h
+++ b/hw/net/igb_core.h
@@ -143,4 +143,7 @@ igb_receive_iov(IGBCore *core, const struct iovec *iov, int 
iovcnt);
  void
  igb_start_recv(IGBCore *core);
+void
+igb_core_num_vfs_change_handle(IGBCore *core);
+
  #endif



reply via email to

[Prev in Thread] Current Thread [Next in Thread]