qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 1/4] vhost-user: fix crash on socket disconnect.


From: Ilya Maximets
Subject: [Qemu-devel] [PATCH 1/4] vhost-user: fix crash on socket disconnect.
Date: Wed, 30 Mar 2016 18:14:06 +0300

After disconnect of vhost-user socket (Ex.: host application crash)
QEMU will crash with segmentation fault while virtio driver unbinding
inside the guest.

This happens because vhost_net_cleanup() called while stopping
virtqueue #0 because of CHR_EVENT_CLOSED event arriving.
After that stopping of virtqueue #1 crashes because of cleaned and
freed device memory:

[-------------------------------- cut -----------------------------------]
[guest]# echo -n '0000:00:01.0' > /sys/bus/pci/drivers/virtio-pci/unbind
qemu: Failed to read msg header. Read 0 instead of 12. Original request 11.
qemu: Failed to read msg header. Read 0 instead of 12. Original request 11.
qemu: Failed to read msg header. Read 0 instead of 12. Original request 11.

Child terminated with signal = 0xb (SIGSEGV)
GDBserver exiting
[-------------------------------- cut -----------------------------------]
[host]# gdb
Breakpoint 1 vhost_user_cleanup(dev) at hw/virtio/vhost-user.c
(gdb) bt
#0  vhost_user_cleanup # executes 'dev->opaque = 0;'
#1  vhost_dev_cleanup
#2  vhost_net_cleanup  # After return from #1: g_free(net)
#3  vhost_user_stop
#4  net_vhost_user_event (<...>, event=5) /* CHR_EVENT_CLOSED */
#5  qemu_chr_be_event (<...>, address@hidden)
#6  tcp_chr_disconnect
#7  tcp_chr_sync_read
#8  qemu_chr_fe_read_all
#9  vhost_user_read
#10 vhost_user_get_vring_base
#11 vhost_virtqueue_stop (vq=0xffe190, idx=0)
#12 vhost_dev_stop
#13 vhost_net_stop_one
#14 vhost_net_stop
#15 virtio_net_vhost_status
#16 virtio_net_set_status
#17 virtio_set_status
<...>
(gdb) c
Continuing.

Program received signal SIGSEGV, Segmentation fault.
qemu_chr_fe_write_all (address@hidden, <...>) at qemu-char.c:302
302         if (s->replay && replay_mode == REPLAY_MODE_PLAY) {
(gdb) bt
#0  qemu_chr_fe_write_all (address@hidden, <...>)
#1  vhost_user_write
#2  vhost_user_get_vring_base
#3  vhost_virtqueue_stop (vq=0xffe190, idx=1) # device already freed here
#4  vhost_dev_stop
#5  vhost_net_stop_one
#6  vhost_net_stop
#7  virtio_net_vhost_status
#8  virtio_net_set_status
#9  virtio_set_status
<...>
[-------------------------------- cut -----------------------------------]

Fix that by introducing of reference counter for vhost_net device
and freeing memory only after dropping of last reference.

Signed-off-by: Ilya Maximets <address@hidden>
---
 hw/net/vhost_net.c       | 22 +++++++++++++++++++---
 hw/net/virtio-net.c      | 32 ++++++++++++++++++++++++--------
 include/net/vhost-user.h |  1 +
 include/net/vhost_net.h  |  1 +
 net/filter.c             |  1 +
 net/vhost-user.c         | 43 ++++++++++++++++++++++++++++++++++---------
 6 files changed, 80 insertions(+), 20 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 6e1032f..55d5456 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -296,6 +296,7 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
         if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
                 dev->use_guest_notifier_mask = false;
         }
+        put_vhost_net(ncs[i].peer);
      }
 
     r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
@@ -306,7 +307,7 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
 
     for (i = 0; i < total_queues; i++) {
         r = vhost_net_start_one(get_vhost_net(ncs[i].peer), dev);
-
+        put_vhost_net(ncs[i].peer);
         if (r < 0) {
             goto err_start;
         }
@@ -317,6 +318,7 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
 err_start:
     while (--i >= 0) {
         vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
+        put_vhost_net(ncs[i].peer);
     }
     e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
     if (e < 0) {
@@ -337,6 +339,7 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
 
     for (i = 0; i < total_queues; i++) {
         vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
+        put_vhost_net(ncs[i].peer);
     }
 
     r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
@@ -398,16 +401,25 @@ VHostNetState *get_vhost_net(NetClientState *nc)
     return vhost_net;
 }
 
+void put_vhost_net(NetClientState *nc)
+{
+    if (nc && nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+        vhost_user_put_vhost_net(nc);
+    }
+}
+
 int vhost_set_vring_enable(NetClientState *nc, int enable)
 {
     VHostNetState *net = get_vhost_net(nc);
     const VhostOps *vhost_ops = net->dev.vhost_ops;
+    int res = 0;
 
     if (vhost_ops->vhost_set_vring_enable) {
-        return vhost_ops->vhost_set_vring_enable(&net->dev, enable);
+        res = vhost_ops->vhost_set_vring_enable(&net->dev, enable);
     }
 
-    return 0;
+    put_vhost_net(nc);
+    return res;
 }
 
 #else
@@ -466,6 +478,10 @@ VHostNetState *get_vhost_net(NetClientState *nc)
     return 0;
 }
 
+void put_vhost_net(NetClientState *nc)
+{
+}
+
 int vhost_set_vring_enable(NetClientState *nc, int enable)
 {
     return 0;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 5798f87..e5456ef 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -121,6 +121,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t 
status)
     if (!get_vhost_net(nc->peer)) {
         return;
     }
+    put_vhost_net(nc->peer);
 
     if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
         !!n->vhost_started) {
@@ -521,6 +522,8 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, 
uint64_t features,
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_queue(n->nic);
+    VHostNetState *net;
+    uint64_t res;
 
     /* Firstly sync all virtio-net possible supported features */
     features |= n->host_features;
@@ -544,10 +547,15 @@ static uint64_t virtio_net_get_features(VirtIODevice 
*vdev, uint64_t features,
         virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
     }
 
-    if (!get_vhost_net(nc->peer)) {
-        return features;
+    net = get_vhost_net(nc->peer);
+    if (!net) {
+        res = features;
+    } else {
+        res = vhost_net_get_features(net, features);
+        put_vhost_net(nc->peer);
     }
-    return vhost_net_get_features(get_vhost_net(nc->peer), features);
+
+    return res;
 }
 
 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
@@ -615,11 +623,13 @@ static void virtio_net_set_features(VirtIODevice *vdev, 
uint64_t features)
 
     for (i = 0;  i < n->max_queues; i++) {
         NetClientState *nc = qemu_get_subqueue(n->nic, i);
+        VHostNetState *net = get_vhost_net(nc->peer);
 
-        if (!get_vhost_net(nc->peer)) {
+        if (!net) {
             continue;
         }
-        vhost_net_ack_features(get_vhost_net(nc->peer), features);
+        vhost_net_ack_features(net, features);
+        put_vhost_net(nc->peer);
     }
 
     if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
@@ -1698,8 +1708,13 @@ static bool 
virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
+    VHostNetState *net = get_vhost_net(nc->peer);
+    bool res;
+
     assert(n->vhost_started);
-    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
+    res = vhost_net_virtqueue_pending(net, idx);
+    put_vhost_net(nc->peer);
+    return res;
 }
 
 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
@@ -1707,9 +1722,10 @@ static void virtio_net_guest_notifier_mask(VirtIODevice 
*vdev, int idx,
 {
     VirtIONet *n = VIRTIO_NET(vdev);
     NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
+    VHostNetState *net = get_vhost_net(nc->peer);
     assert(n->vhost_started);
-    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
-                             vdev, idx, mask);
+    vhost_net_virtqueue_mask(net, vdev, idx, mask);
+    put_vhost_net(nc->peer);
 }
 
 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
diff --git a/include/net/vhost-user.h b/include/net/vhost-user.h
index 85109f6..6bdaa1a 100644
--- a/include/net/vhost-user.h
+++ b/include/net/vhost-user.h
@@ -13,5 +13,6 @@
 
 struct vhost_net;
 struct vhost_net *vhost_user_get_vhost_net(NetClientState *nc);
+void vhost_user_put_vhost_net(NetClientState *nc);
 
 #endif /* VHOST_USER_H_ */
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 3389b41..577bfa8 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -29,6 +29,7 @@ void vhost_net_virtqueue_mask(VHostNetState *net, 
VirtIODevice *dev,
                               int idx, bool mask);
 int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
 VHostNetState *get_vhost_net(NetClientState *nc);
+void put_vhost_net(NetClientState *nc);
 
 int vhost_set_vring_enable(NetClientState * nc, int enable);
 #endif
diff --git a/net/filter.c b/net/filter.c
index 1c4fc5a..28403aa 100644
--- a/net/filter.c
+++ b/net/filter.c
@@ -214,6 +214,7 @@ static void netfilter_complete(UserCreatable *uc, Error 
**errp)
 
     if (get_vhost_net(ncs[0])) {
         error_setg(errp, "Vhost is not supported");
+        put_vhost_net(ncs[0]);
         return;
     }
 
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 1b9e73a..9026df3 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -22,6 +22,7 @@ typedef struct VhostUserState {
     NetClientState nc;
     CharDriverState *chr;
     VHostNetState *vhost_net;
+    int refcnt;
 } VhostUserState;
 
 typedef struct VhostUserChardevProps {
@@ -29,13 +30,41 @@ typedef struct VhostUserChardevProps {
     bool is_unix;
 } VhostUserChardevProps;
 
+static void vhost_user_net_ref(VhostUserState *s)
+{
+    if (!s->vhost_net) {
+        return;
+    }
+    s->refcnt++;
+}
+
+static void vhost_user_net_unref(VhostUserState *s)
+{
+    if (!s->vhost_net) {
+        return;
+    }
+
+    if (--s->refcnt == 0) {
+        vhost_net_cleanup(s->vhost_net);
+        s->vhost_net = NULL;
+    }
+}
+
 VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
 {
     VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
     assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
+    vhost_user_net_ref(s);
     return s->vhost_net;
 }
 
+void vhost_user_put_vhost_net(NetClientState *nc)
+{
+    VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
+    assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
+    vhost_user_net_unref(s);
+}
+
 static int vhost_user_running(VhostUserState *s)
 {
     return (s->vhost_net) ? 1 : 0;
@@ -54,10 +83,7 @@ static void vhost_user_stop(int queues, NetClientState 
*ncs[])
             continue;
         }
 
-        if (s->vhost_net) {
-            vhost_net_cleanup(s->vhost_net);
-            s->vhost_net = NULL;
-        }
+        vhost_user_net_unref(s);
     }
 }
 
@@ -81,6 +107,7 @@ static int vhost_user_start(int queues, NetClientState 
*ncs[])
         options.net_backend = ncs[i];
         options.opaque      = s->chr;
         s->vhost_net = vhost_net_init(&options);
+        vhost_user_net_ref(s);
         if (!s->vhost_net) {
             error_report("failed to init vhost_net for queue %d", i);
             goto err;
@@ -119,7 +146,9 @@ static ssize_t vhost_user_receive(NetClientState *nc, const 
uint8_t *buf,
         /* extract guest mac address from the RARP message */
         memcpy(mac_addr, &buf[6], 6);
 
+        vhost_user_net_ref(s);
         r = vhost_net_notify_migration_done(s->vhost_net, mac_addr);
+        vhost_user_net_unref(s);
 
         if ((r != 0) && (display_rarp_failure)) {
             fprintf(stderr,
@@ -136,11 +165,7 @@ static void vhost_user_cleanup(NetClientState *nc)
 {
     VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc);
 
-    if (s->vhost_net) {
-        vhost_net_cleanup(s->vhost_net);
-        s->vhost_net = NULL;
-    }
-
+    vhost_user_net_unref(s);
     qemu_purge_queued_packets(nc);
 }
 
-- 
2.5.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]