qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH] net: vhost-user reconnect


From: Michael S. Tsirkin
Subject: Re: [Qemu-devel] [PATCH] net: vhost-user reconnect
Date: Mon, 13 Jul 2015 18:18:51 +0300

On Mon, Jul 13, 2015 at 02:44:37PM +0000, Matus Fabian -X (matfabia - Pantheon 
Technologies SRO at Cisco) wrote:
> If userspace switch restarts it will reconnect to unix socket but QEMU will 
> not
> send any vhost-user information and that basically means that userspace switch
> restart requires restart of VM.
> Fix detects that userspace switch is disconnected and notify VM that link 
> status
> is down. After user space switch is reconnected QEMU send vhost-user 
> information
> to the userspace switch and notify VM that link is up.
> 
> Signed-off-by: Matus Fabian <address@hidden>

This only works most of the time.  Reconnect can't be implemented
without guest changes.
See http://mid.gmane.org/address@hidden


> ---
>  hw/net/vhost_net.c         | 126 +++++++++++++++++++++++++++++++++++++++++
>  hw/net/virtio-net.c        |  62 ++++++++++++--------
>  hw/virtio/vhost.c          | 137 
> +++++++++++++++++++++++++++++++++++++++++++++
>  hw/virtio/virtio.c         |  24 ++++++++
>  include/hw/virtio/vhost.h  |   8 +++
>  include/hw/virtio/virtio.h |   1 +
>  include/net/vhost_net.h    |   7 +++
>  include/sysemu/char.h      |   3 +
>  net/vhost-user.c           |  24 ++++++++
>  qemu-char.c                | 101 +++++++++++++++++++++++++++++++++
>  10 files changed, 470 insertions(+), 23 deletions(-)
> 
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 9bd360b..16a31d7 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -167,6 +167,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
>      if (r < 0) {
>          goto fail;
>      }
> +    net->dev.features |= 1 << VIRTIO_NET_F_STATUS;
>      if (backend_kernel) {
>          if (!qemu_has_vnet_hdr_len(options->net_backend,
>                                 sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
> @@ -380,6 +381,23 @@ void vhost_net_cleanup(struct vhost_net *net)
>      g_free(net);
>  }
>  
> +void vhost_net_reconnect_init(VHostNetState *net)
> +{
> +    int r;
> +
> +    r = vhost_dev_reconnect_init(&net->dev);
> +    if (r < 0) {
> +        fprintf(stderr, "vhost reconnect init failed: %d\n", r);
> +        fflush(stderr);
> +    }
> +    net->dev.features |= 1 << VIRTIO_NET_F_STATUS;
> +}
> +
> +void vhost_net_hup(VHostNetState *net)
> +{
> +    vhost_dev_hup(&net->dev);
> +}
> +
>  bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
>  {
>      return vhost_virtqueue_pending(&net->dev, idx);
> @@ -412,6 +430,89 @@ VHostNetState *get_vhost_net(NetClientState *nc)
>  
>      return vhost_net;
>  }
> +
> +bool vhost_net_in_reconnect(NetClientState *nc)
> +{
> +    VHostNetState *vhost_net = 0;
> +
> +    if (!nc) {
> +        return false;
> +    }
> +    switch (nc->info->type) {
> +    case NET_CLIENT_OPTIONS_KIND_TAP:
> +        vhost_net = tap_get_vhost_net(nc);
> +        break;
> +    case NET_CLIENT_OPTIONS_KIND_VHOST_USER:
> +        vhost_net = vhost_user_get_vhost_net(nc);
> +        break;
> +    default:
> +        break;
> +    }
> +
> +    if (!vhost_net) {
> +        return false;
> +    }
> +
> +    return vhost_dev_in_reconnect(&vhost_net->dev);
> +}
> +
> +bool vhost_net_is_hup(NetClientState *nc)
> +{
> +    VHostNetState *vhost_net = 0;
> +
> +    if (!nc) {
> +        return false;
> +    }
> +    switch (nc->info->type) {
> +    case NET_CLIENT_OPTIONS_KIND_TAP:
> +        vhost_net = tap_get_vhost_net(nc);
> +        break;
> +    case NET_CLIENT_OPTIONS_KIND_VHOST_USER:
> +        vhost_net = vhost_user_get_vhost_net(nc);
> +        break;
> +    default:
> +        break;
> +    }
> +
> +    if (!vhost_net) {
> +        return false;
> +    }
> +
> +    return vhost_dev_is_hup(&vhost_net->dev);
> +}
> +
> +int vhost_net_reconnect(VirtIODevice *dev, NetClientState *ncs,
> +                        int total_queues)
> +{
> +    int r, i;
> +
> +    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
> +    VirtioBusState *vbus = VIRTIO_BUS(qbus);
> +    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
> +
> +    if (!k->set_guest_notifiers) {
> +        error_report("binding does not support guest notifiers");
> +        r = -ENOSYS;
> +        return r;
> +    }
> +
> +    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
> +    if (r < 0) {
> +        error_report("Error binding guest notifier: %d", -r);
> +        return r;
> +    }
> +
> +    for (i = 0; i < total_queues; i++) {
> +        VHostNetState *net = get_vhost_net(ncs[i].peer);
> +        r = vhost_dev_reconnect(&net->dev, dev);
> +        vhost_dev_reconnect_done(&net->dev);
> +        if (r < 0) {
> +            return r;
> +        }
> +    }
> +    return 0;
> +}
> +
>  #else
>  struct vhost_net *vhost_net_init(VhostNetOptions *options)
>  {
> @@ -457,4 +558,29 @@ VHostNetState *get_vhost_net(NetClientState *nc)
>  {
>      return 0;
>  }
> +
> +void vhost_net_reconnect_init(VHostNetState *net)
> +{
> +}
> +
> +bool vhost_net_in_reconnect(NetClientState *nc)
> +{
> +    return false;
> +}
> +
> +int vhost_net_reconnect(VirtIODevice *dev, NetClientState *ncs,
> +                        int total_queues)
> +{
> +    return -ENOSYS;
> +}
> +
> +void vhost_net_hup(VHostNetState *net)
> +{
> +}
> +
> +bool vhost_net_is_hup(NetClientState *nc)
> +{
> +    return false;
> +}
> +
>  #endif
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index d728233..490a187 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -121,6 +121,11 @@ static void virtio_net_vhost_status(VirtIONet *n, 
> uint8_t status)
>          return;
>      }
>  
> +    if (vhost_net_in_reconnect(nc->peer)) {
> +        vhost_net_reconnect(vdev, n->nic->ncs, queues);
> +        return;
> +    }
> +
>      if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
>          !!n->vhost_started) {
>          return;
> @@ -156,36 +161,44 @@ static void virtio_net_set_status(struct VirtIODevice 
> *vdev, uint8_t status)
>  {
>      VirtIONet *n = VIRTIO_NET(vdev);
>      VirtIONetQueue *q;
> +    NetClientState *nc = qemu_get_queue(n->nic);
>      int i;
>      uint8_t queue_status;
>  
> -    virtio_net_vhost_status(n, status);
> -
> -    for (i = 0; i < n->max_queues; i++) {
> -        q = &n->vqs[i];
> +    if (vhost_net_is_hup(nc->peer) && !(status & VIRTIO_NET_S_LINK_UP)) {
> +        return;
> +    }
>  
> -        if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
> -            queue_status = 0;
> -        } else {
> -            queue_status = status;
> -        }
> +    virtio_net_vhost_status(n, status);
>  
> -        if (!q->tx_waiting) {
> -            continue;
> -        }
> +    if (!vhost_net_in_reconnect(nc->peer)) {
> +        for (i = 0; i < n->max_queues; i++) {
> +            q = &n->vqs[i];
>  
> -        if (virtio_net_started(n, queue_status) && !n->vhost_started) {
> -            if (q->tx_timer) {
> -                timer_mod(q->tx_timer,
> -                               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 
> n->tx_timeout);
> +            if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
> +                queue_status = 0;
>              } else {
> -                qemu_bh_schedule(q->tx_bh);
> +                queue_status = status;
>              }
> -        } else {
> -            if (q->tx_timer) {
> -                timer_del(q->tx_timer);
> +
> +            if (!q->tx_waiting) {
> +                continue;
> +            }
> +
> +            if (virtio_net_started(n, queue_status) && !n->vhost_started) {
> +                if (q->tx_timer) {
> +                    timer_mod(q->tx_timer,
> +                              qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
> +                                                n->tx_timeout);
> +                } else {
> +                    qemu_bh_schedule(q->tx_bh);
> +                }
>              } else {
> -                qemu_bh_cancel(q->tx_bh);
> +                if (q->tx_timer) {
> +                    timer_del(q->tx_timer);
> +                } else {
> +                    qemu_bh_cancel(q->tx_bh);
> +                }
>              }
>          }
>      }
> @@ -197,10 +210,13 @@ static void virtio_net_set_link_status(NetClientState 
> *nc)
>      VirtIODevice *vdev = VIRTIO_DEVICE(n);
>      uint16_t old_status = n->status;
>  
> -    if (nc->link_down)
> +    if (nc->link_down) {
>          n->status &= ~VIRTIO_NET_S_LINK_UP;
> -    else
> +        vdev->status &= ~VIRTIO_NET_S_LINK_UP;
> +    } else {
>          n->status |= VIRTIO_NET_S_LINK_UP;
> +        vdev->status |= VIRTIO_NET_S_LINK_UP;
> +    }
>  
>      if (n->status != old_status)
>          virtio_notify_config(vdev);
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 2712c6f..5edaa9a 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -817,6 +817,51 @@ fail_alloc_desc:
>      return r;
>  }
>  
> +static int vhost_virtqueue_resend(struct vhost_dev *dev,
> +                                  struct VirtIODevice *vdev,
> +                                  struct vhost_virtqueue *vq,
> +                                  unsigned idx)
> +{
> +    int r;
> +    int vhost_vq_index = idx - dev->vq_index;
> +    struct vhost_vring_file file = {
> +        .index = vhost_vq_index
> +    };
> +    struct vhost_vring_state state = {
> +        .index = vhost_vq_index
> +    };
> +    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
> +
> +    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
> +
> +    vq->num = state.num = virtio_queue_get_num(vdev, idx);
> +    r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state);
> +    if (r) {
> +        return -errno;
> +    }
> +
> +    state.num = virtio_queue_reset_and_get_last_avail_idx(vdev, idx);
> +    r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state);
> +    if (r) {
> +        return -errno;
> +    }
> +    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
> +    if (r < 0) {
> +        return -errno;
> +    }
> +
> +    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
> +    r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file);
> +    if (r) {
> +        return -errno;
> +    }
> +
> +    /* Clear and discard previous events if any. */
> +    event_notifier_test_and_clear(&vq->masked_notifier);
> +
> +    return 0;
> +}
> +
>  static void vhost_virtqueue_stop(struct vhost_dev *dev,
>                                      struct VirtIODevice *vdev,
>                                      struct vhost_virtqueue *vq,
> @@ -872,6 +917,17 @@ static void vhost_eventfd_del(MemoryListener *listener,
>  {
>  }
>  
> +static int vhost_virtqueue_recon(struct vhost_dev *dev,
> +                                 struct vhost_virtqueue *vq, int n)
> +{
> +    struct vhost_vring_file file = {
> +        .index = n,
> +    };
> +
> +    file.fd = event_notifier_get_fd(&vq->masked_notifier);
> +    return dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file);
> +}
> +
>  static int vhost_virtqueue_init(struct vhost_dev *dev,
>                                  struct vhost_virtqueue *vq, int n)
>  {
> @@ -962,6 +1018,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      hdev->log_size = 0;
>      hdev->log_enabled = false;
>      hdev->started = false;
> +    hdev->in_reconnect = false;
> +    hdev->hup = false;
>      hdev->memory_changed = false;
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      return 0;
> @@ -1184,3 +1242,82 @@ void vhost_dev_stop(struct vhost_dev *hdev, 
> VirtIODevice *vdev)
>      hdev->log_size = 0;
>  }
>  
> +int vhost_dev_reconnect_init(struct vhost_dev *hdev)
> +{
> +    uint64_t features;
> +    int i, r;
> +
> +    r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
> +    if (r < 0) {
> +        return r;
> +    }
> +    r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
> +    if (r < 0) {
> +        return r;
> +    }
> +    hdev->features = features;
> +    hdev->in_reconnect = true;
> +
> +    for (i = 0; i < hdev->nvqs; ++i) {
> +        r = vhost_virtqueue_recon(hdev, hdev->vqs + i, i);
> +        if (r < 0) {
> +            return r;
> +        }
> +    }
> +
> +    return r;
> +}
> +
> +void vhost_dev_hup(struct vhost_dev *hdev)
> +{
> +    hdev->hup = true;
> +}
> +
> +bool vhost_dev_in_reconnect(struct vhost_dev *hdev)
> +{
> +    return hdev->in_reconnect;
> +}
> +
> +bool vhost_dev_is_hup(struct vhost_dev *hdev)
> +{
> +    return hdev->hup;
> +}
> +
> +void vhost_dev_reconnect_done(struct vhost_dev *hdev)
> +{
> +    hdev->in_reconnect = false;
> +    hdev->hup = false;
> +}
> +
> +int vhost_dev_reconnect(struct vhost_dev *hdev, VirtIODevice *vdev)
> +{
> +    int i, r;
> +
> +    r = vhost_dev_set_features(hdev, hdev->log_enabled);
> +    if (r < 0) {
> +        return r;
> +    }
> +    r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem);
> +    if (r < 0) {
> +        return r;
> +    }
> +    for (i = 0; i < hdev->nvqs; ++i) {
> +        r = vhost_virtqueue_resend(hdev, vdev, hdev->vqs + i,
> +                                   hdev->vq_index + i);
> +        if (r < 0) {
> +            return r;
> +        }
> +    }
> +
> +    if (hdev->log_enabled) {
> +        hdev->log_size = vhost_get_log_size(hdev);
> +        hdev->log = hdev->log_size ?
> +            g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
> +        r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE, hdev->log);
> +        if (r < 0) {
> +            return r;
> +        }
> +    }
> +
> +    return r;
> +}
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index ee4e07c..ea7e1c7 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -151,6 +151,13 @@ static inline uint16_t vring_avail_idx(VirtQueue *vq)
>      return virtio_lduw_phys(vq->vdev, pa);
>  }
>  
> +static inline void vring_set_avail_idx(VirtQueue *vq, uint16_t val)
> +{
> +    hwaddr pa;
> +    pa = vq->vring.avail + offsetof(VRingAvail, idx);
> +    virtio_stw_phys(vq->vdev, pa, val);
> +}
> +
>  static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
>  {
>      hwaddr pa;
> @@ -1424,6 +1431,23 @@ uint16_t virtio_queue_get_last_avail_idx(VirtIODevice 
> *vdev, int n)
>      return vdev->vq[n].last_avail_idx;
>  }
>  
> +uint16_t virtio_queue_reset_and_get_last_avail_idx(VirtIODevice *vdev, int n)
> +{
> +    uint16_t r;
> +    VirtQueue *q;
> +
> +    q = virtio_get_queue(vdev, n);
> +
> +    if (n % 2) {
> +        r = vring_avail_idx(q);
> +        vring_set_avail_idx(q, r);
> +    } else {
> +        r = vring_used_idx(q);
> +    }
> +
> +    return r;
> +}
> +
>  void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
>  {
>      vdev->vq[n].last_avail_idx = idx;
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index dd51050..cae4fe9 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -48,6 +48,8 @@ struct vhost_dev {
>      unsigned long long acked_features;
>      unsigned long long backend_features;
>      bool started;
> +    bool in_reconnect;
> +    bool hup;
>      bool log_enabled;
>      unsigned long long log_size;
>      Error *migration_blocker;
> @@ -67,6 +69,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice 
> *vdev);
>  void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
>  int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
>  void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
> +int vhost_dev_reconnect_init(struct vhost_dev *hdev);
> +int vhost_dev_reconnect(struct vhost_dev *hdev, VirtIODevice *vdev);
> +bool vhost_dev_in_reconnect(struct vhost_dev *hdev);
> +void vhost_dev_reconnect_done(struct vhost_dev *hdev);
> +void vhost_dev_hup(struct vhost_dev *hdev);
> +bool vhost_dev_is_hup(struct vhost_dev *hdev);
>  
>  /* Test and clear masked event pending status.
>   * Should be called after unmask to avoid losing events.
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index 473fb75..c1564f1 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -225,6 +225,7 @@ hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, 
> int n);
>  hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
>  hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n);
>  uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
> +uint16_t virtio_queue_reset_and_get_last_avail_idx(VirtIODevice *vdev, int 
> n);
>  void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t 
> idx);
>  void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
>  VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
> diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> index 840d4b1..5ef6fdd 100644
> --- a/include/net/vhost_net.h
> +++ b/include/net/vhost_net.h
> @@ -20,6 +20,13 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState 
> *ncs, int total_queues);
>  
>  void vhost_net_cleanup(VHostNetState *net);
>  
> +void vhost_net_reconnect_init(VHostNetState *net);
> +bool vhost_net_in_reconnect(NetClientState *nc);
> +int vhost_net_reconnect(VirtIODevice *dev, NetClientState *ncs,
> +                        int total_queues);
> +void vhost_net_hup(VHostNetState *net);
> +bool vhost_net_is_hup(NetClientState *nc);
> +
>  uint64_t vhost_net_get_features(VHostNetState *net, uint64_t features);
>  void vhost_net_ack_features(VHostNetState *net, uint64_t features);
>  
> diff --git a/include/sysemu/char.h b/include/sysemu/char.h
> index 832b7fe..9eb9b8c 100644
> --- a/include/sysemu/char.h
> +++ b/include/sysemu/char.h
> @@ -18,6 +18,8 @@
>  #define CHR_EVENT_MUX_IN  3 /* mux-focus was set to this terminal */
>  #define CHR_EVENT_MUX_OUT 4 /* mux-focus will move on */
>  #define CHR_EVENT_CLOSED  5 /* connection closed */
> +#define CHR_EVENT_RECON   6 /* connection reconnected */
> +#define CHR_EVENT_HUP     7 /* connection hang up */
>  
>  
>  #define CHR_IOCTL_SERIAL_SET_PARAMS   1
> @@ -84,6 +86,7 @@ struct CharDriverState {
>      int avail_connections;
>      int is_mux;
>      guint fd_in_tag;
> +    guint fd_hup_tag;
>      QemuOpts *opts;
>      QTAILQ_ENTRY(CharDriverState) next;
>  };
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index b51bc04..33511a4 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -112,6 +112,20 @@ static void net_vhost_link_down(VhostUserState *s, bool 
> link_down)
>      }
>  }
>  
> +static void vhost_user_reconnect(VhostUserState *s)
> +{
> +    if (vhost_user_running(s)) {
> +        vhost_net_reconnect_init(s->vhost_net);
> +    }
> +}
> +
> +static void vhost_user_hup(VhostUserState *s)
> +{
> +    if (vhost_user_running(s)) {
> +        vhost_net_hup(s->vhost_net);
> +    }
> +}
> +
>  static void net_vhost_user_event(void *opaque, int event)
>  {
>      VhostUserState *s = opaque;
> @@ -127,6 +141,16 @@ static void net_vhost_user_event(void *opaque, int event)
>          vhost_user_stop(s);
>          error_report("chardev \"%s\" went down", s->nc.info_str);
>          break;
> +    case CHR_EVENT_RECON:
> +        vhost_user_reconnect(s);
> +        net_vhost_link_down(s, false);
> +        error_report("chardev \"%s\" reconnected", s->nc.info_str);
> +        break;
> +    case CHR_EVENT_HUP:
> +        vhost_user_hup(s);
> +        net_vhost_link_down(s, true);
> +        error_report("chardev \"%s\" hang up", s->nc.info_str);
> +        break;
>      }
>  }
>  
> diff --git a/qemu-char.c b/qemu-char.c
> index 617e034..9f42d04 100644
> --- a/qemu-char.c
> +++ b/qemu-char.c
> @@ -2904,6 +2904,105 @@ CharDriverState *qemu_chr_open_eventfd(int eventfd)
>  }
>  #endif
>  
> +static gboolean tcp_chr_chan_hup(GIOChannel *chanel, GIOCondition cond,
> +                                 void *opaque);
> +
> +static void tcp_chr_telnet_init(int fd);
> +
> +static gboolean tcp_chr_recon(GIOChannel *chanel, GIOCondition cond,
> +                              void *opaque)
> +{
> +    CharDriverState *chr = opaque;
> +    TCPCharDriver *s = chr->opaque;
> +    struct sockaddr_in saddr;
> +#ifndef _WIN32
> +    struct sockaddr_un uaddr;
> +#endif
> +    struct sockaddr *addr;
> +    socklen_t len;
> +    int fd;
> +
> +    for (;;) {
> +#ifndef _WIN32
> +        if (s->is_unix) {
> +            len = sizeof(uaddr);
> +            addr = (struct sockaddr *)&uaddr;
> +        } else
> +#endif
> +        {
> +            len = sizeof(saddr);
> +            addr = (struct sockaddr *)&saddr;
> +        }
> +        fd = qemu_accept(s->listen_fd, addr, &len);
> +        if (fd < 0 && errno != EINTR) {
> +            s->listen_tag = 0;
> +            return FALSE;
> +        } else if (fd >= 0) {
> +            if (s->do_telnetopt) {
> +                tcp_chr_telnet_init(fd);
> +            }
> +            break;
> +        }
> +    }
> +    qemu_set_nonblock(fd);
> +    if (s->do_nodelay) {
> +        socket_set_nodelay(fd);
> +    }
> +    s->fd = fd;
> +    s->chan = io_channel_from_socket(fd);
> +    SocketAddress_to_str(chr->filename, CHR_MAX_FILENAME_SIZE,
> +                         "", s->addr, s->is_listen, s->is_telnet);
> +    s->connected = 1;
> +    if (s->chan) {
> +        chr->fd_in_tag = io_add_watch_poll(s->chan, tcp_chr_read_poll,
> +                                           tcp_chr_read, chr);
> +    }
> +    if (chr->chr_event) {
> +        chr->chr_event(chr->handler_opaque, CHR_EVENT_RECON);
> +    }
> +    if (s->listen_tag) {
> +        g_source_remove(s->listen_tag);
> +        s->listen_tag = 0;
> +    }
> +    if (s->chan) {
> +        chr->fd_hup_tag = g_io_add_watch(s->chan, G_IO_HUP, tcp_chr_chan_hup,
> +                                         chr);
> +    }
> +
> +    return TRUE;
> +}
> +
> +static gboolean tcp_chr_chan_hup(GIOChannel *chanel, GIOCondition cond,
> +                                 void *opaque)
> +{
> +    CharDriverState *chr = opaque;
> +    TCPCharDriver *s = chr->opaque;
> +
> +    if (!(cond & G_IO_HUP)) {
> +        return FALSE;
> +    }
> +    if (chr->fd_hup_tag) {
> +        g_source_remove(chr->fd_hup_tag);
> +        chr->fd_hup_tag = 0;
> +    }
> +    if (s->is_listen) {
> +        s->listen_tag = g_io_add_watch(s->listen_chan, G_IO_IN, 
> tcp_chr_recon,
> +                                       chr);
> +    }
> +    remove_fd_in_watch(chr);
> +    g_io_channel_unref(s->chan);
> +    s->chan = NULL;
> +    closesocket(s->fd);
> +    s->fd = -1;
> +    SocketAddress_to_str(chr->filename, CHR_MAX_FILENAME_SIZE,
> +                         "disconnected:", s->addr, s->is_listen, 
> s->is_telnet);
> +    if (chr->chr_event) {
> +        chr->chr_event(chr->handler_opaque, CHR_EVENT_HUP);
> +    }
> +
> +    return TRUE;
> +}
> +
>  static void tcp_chr_connect(void *opaque)
>  {
>      CharDriverState *chr = opaque;
> @@ -2928,6 +3027,8 @@ static void tcp_chr_connect(void *opaque)
>      if (s->chan) {
>          chr->fd_in_tag = io_add_watch_poll(s->chan, tcp_chr_read_poll,
>                                             tcp_chr_read, chr);
> +        chr->fd_hup_tag = g_io_add_watch(s->chan, G_IO_HUP, tcp_chr_chan_hup,
> +                                         chr);
>      }
>      qemu_chr_be_generic_open(chr);
>  }
> -- 
> 1.9.1



reply via email to

[Prev in Thread] Current Thread [Next in Thread]