qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC Patch v2 05/10] virtio-net rsc: Create timer to drain


From: wexu
Subject: [Qemu-devel] [RFC Patch v2 05/10] virtio-net rsc: Create timer to drain the packets from the cache pool
Date: Mon, 1 Feb 2016 02:13:24 +0800

From: Wei Xu <address@hidden>

The timer will only be triggered if the packets pool is not empty,
and it'll drain off all the cached packets, this is to reduce the
delay to upper layer protocol stack.

Signed-off-by: Wei Xu <address@hidden>
---
 hw/net/virtio-net.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 4f77fbe..93df0d5 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -48,12 +48,17 @@
 
 #define MAX_VIRTIO_IP_PAYLOAD  (65535 + IP_OFFSET)
 
+/* Purge coalesced packets timer interval */
+#define RSC_TIMER_INTERVAL  500000
+
 /* Global statistics */
 static uint32_t rsc_chain_no_mem;
 
 /* Switcher to enable/disable rsc */
 static bool virtio_net_rsc_bypass;
 
+static uint32_t rsc_timeout = RSC_TIMER_INTERVAL;
+
 /* Coalesce callback for ipv4/6 */
 typedef int32_t (VirtioNetCoalesce) (NetRscChain *chain, NetRscSeg *seg,
                                      const uint8_t *buf, size_t size);
@@ -1625,6 +1630,35 @@ static int virtio_net_load_device(VirtIODevice *vdev, 
QEMUFile *f,
     return 0;
 }
 
+static void virtio_net_rsc_purge(void *opq)
+{
+    int ret = 0;
+    NetRscChain *chain = (NetRscChain *)opq;
+    NetRscSeg *seg, *rn;
+
+    QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
+        if (!qemu_can_send_packet(seg->nc)) {
+            /* Should quit or continue? not sure if one or some
+            * of the queues fail would happen, try continue here */
+            continue;
+        }
+
+        ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
+        QTAILQ_REMOVE(&chain->buffers, seg, next);
+        g_free(seg->buf);
+        g_free(seg);
+
+        if (ret == 0) {
+            /* Try next queue */
+            continue;
+        }
+    }
+
+    if (!QTAILQ_EMPTY(&chain->buffers)) {
+        timer_mod(chain->drain_timer,
+                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);
+    }
+}
 
 static void virtio_net_rsc_cleanup(VirtIONet *n)
 {
@@ -1810,6 +1844,8 @@ static size_t virtio_net_rsc_callback(NetRscChain *chain, 
NetClientState *nc,
         if (!virtio_net_rsc_cache_buf(chain, nc, buf, size)) {
             return 0;
         } else {
+            timer_mod(chain->drain_timer,
+                      qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + rsc_timeout);
             return size;
         }
     }
@@ -1877,6 +1913,8 @@ static NetRscChain 
*virtio_net_rsc_lookup_chain(NetClientState *nc,
     }
 
     chain->proto = proto;
+    chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+                                      virtio_net_rsc_purge, chain);
     chain->do_receive = virtio_net_rsc_receive4;
 
     QTAILQ_INIT(&chain->buffers);
-- 
2.4.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]