qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH


From: Liu Ping Fan
Subject: [Qemu-devel] [PATCH v3 2/7] net: distinguish & defer nested call to BH
Date: Thu, 27 Jun 2013 11:38:44 +0800

When network layer can run on multi-thread, nested call caused by
nc->receive() will raise issue like deadlock. So postpone it to BH.
We distinguish nested call "A->B->A" from parallel call "A->B, B->A"
by using tls variable net_enter. Here A, B stands for a NetClientState.

Signed-off-by: Liu Ping Fan <address@hidden>
---
 net/queue.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 46 insertions(+), 2 deletions(-)

diff --git a/net/queue.c b/net/queue.c
index 1937345..d508b7a 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -24,6 +24,9 @@
 #include "net/queue.h"
 #include "qemu/queue.h"
 #include "net/net.h"
+#include "block/aio.h"
+#include "qemu/main-loop.h"
+#include "qemu/tls.h"
 
 /* The delivery handler may only return zero if it will call
  * qemu_net_queue_flush() when it determines that it is once again able
@@ -58,6 +61,23 @@ struct NetQueue {
     unsigned delivering : 1;
 };
 
+typedef struct NetQueueBH {
+    QEMUBH *bh;
+    NetClientState *nc;
+} NetQueueBH;
+
+static void qemu_net_queue_send_bh(void *opaque)
+{
+    NetQueueBH *q_bh = opaque;
+    NetQueue *queue = q_bh->nc->send_queue;
+
+    qemu_net_queue_flush(queue);
+    qemu_bh_delete(q_bh->bh);
+    g_slice_free(NetQueueBH, q_bh);
+}
+
+DEFINE_TLS(int, net_enter) = 0;
+
 NetQueue *qemu_new_net_queue(NetClientState *nc)
 {
     NetQueue *queue;
@@ -183,9 +203,20 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
                             NetPacketSent *sent_cb)
 {
     ssize_t ret;
+    int reenter;
 
-    if (queue->delivering || !qemu_can_send_packet(sender)) {
+    reenter = ++tls_var(net_enter);
+    assert(reenter <= 2);
+    if (queue->delivering || !qemu_can_send_packet(sender) ||
+        reenter == 2) {
         qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
+        if (reenter == 2) {
+            NetQueueBH *que_bh = g_slice_new(NetQueueBH);
+            que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
+            que_bh->nc = queue->nc;
+            qemu_bh_schedule(que_bh->bh);
+           --tls_var(net_enter);
+        }
         return 0;
     }
 
@@ -196,6 +227,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
     }
 
     qemu_net_queue_flush(queue);
+     --tls_var(net_enter);
 
     return ret;
 }
@@ -208,9 +240,20 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
                                 NetPacketSent *sent_cb)
 {
     ssize_t ret;
+    int reenter;
 
-    if (queue->delivering || !qemu_can_send_packet(sender)) {
+    reenter = ++tls_var(net_enter);
+    assert(reenter <= 2);
+    if (queue->delivering || !qemu_can_send_packet(sender) ||
+        reenter == 2) {
         qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
+        if (reenter == 2) {
+            NetQueueBH *que_bh = g_slice_new(NetQueueBH);
+            que_bh->bh = qemu_bh_new(qemu_net_queue_send_bh, que_bh);
+            que_bh->nc = queue->nc;
+            qemu_bh_schedule(que_bh->bh);
+           --tls_var(net_enter);
+        }
         return 0;
     }
 
@@ -221,6 +264,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
     }
 
     qemu_net_queue_flush(queue);
+    --tls_var(net_enter);
 
     return ret;
 }
-- 
1.8.1.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]