qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 11/12] virtio-net: migration support for multiqu


From: Jason Wang
Subject: Re: [Qemu-devel] [PATCH 11/12] virtio-net: migration support for multiqueue
Date: Tue, 08 Jan 2013 17:27:32 +0800
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/17.0 Thunderbird/17.0

On 01/08/2013 03:10 PM, Michael S. Tsirkin wrote:
> On Fri, Dec 28, 2012 at 06:32:03PM +0800, Jason Wang wrote:
>> This patch add migration support for multiqueue virtio-net. The version were
>> bumped to 12.
>>
>> Signed-off-by: Jason Wang <address@hidden>
>> ---
>>  hw/virtio-net.c |   45 +++++++++++++++++++++++++++++++++++----------
>>  1 files changed, 35 insertions(+), 10 deletions(-)
>>
>> diff --git a/hw/virtio-net.c b/hw/virtio-net.c
>> index aaeef1b..ca4b804 100644
>> --- a/hw/virtio-net.c
>> +++ b/hw/virtio-net.c
>> @@ -21,7 +21,7 @@
>>  #include "virtio-net.h"
>>  #include "vhost_net.h"
>>  
>> -#define VIRTIO_NET_VM_VERSION    11
>> +#define VIRTIO_NET_VM_VERSION    12
> Please don't, use a subsection instead.

Ok, but virtio-net is not converted to VMState, so we can just emulate
the subsection.
>>  #define MAC_TABLE_ENTRIES    64
>>  #define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
>> @@ -1058,16 +1058,18 @@ static void virtio_net_set_multiqueue(VirtIONet *n, 
>> int multiqueue, int ctrl)
>>  
>>  static void virtio_net_save(QEMUFile *f, void *opaque)
>>  {
>> +    int i;
>>      VirtIONet *n = opaque;
>> -    VirtIONetQueue *q = &n->vqs[0];
>>  
>> -    /* At this point, backend must be stopped, otherwise
>> -     * it might keep writing to memory. */
>> -    assert(!q->vhost_started);
>> +    for (i = 0; i < n->max_queues; i++) {
>> +        /* At this point, backend must be stopped, otherwise
>> +         * it might keep writing to memory. */
>> +        assert(!n->vqs[i].vhost_started);
>> +    }
>>      virtio_save(&n->vdev, f);
>>  
>>      qemu_put_buffer(f, n->mac, ETH_ALEN);
>> -    qemu_put_be32(f, q->tx_waiting);
>> +    qemu_put_be32(f, n->vqs[0].tx_waiting);
>>      qemu_put_be32(f, n->mergeable_rx_bufs);
>>      qemu_put_be16(f, n->status);
>>      qemu_put_byte(f, n->promisc);
>> @@ -1083,13 +1085,17 @@ static void virtio_net_save(QEMUFile *f, void 
>> *opaque)
>>      qemu_put_byte(f, n->nouni);
>>      qemu_put_byte(f, n->nobcast);
>>      qemu_put_byte(f, n->has_ufo);
>> +    qemu_put_be16(f, n->max_queues);
> Above is specified by user so seems unnecessary in the migration stream.

It is used to prevent the following case:

Move a from a 4q to 2q with 1q active, if we don't do this, after
migration guest may still think it can have 4q.
> Below should only be put if relevant: check host feature bit
> set and/or max_queues > 1.

Right.
>
>> +    qemu_put_be16(f, n->curr_queues);
>> +    for (i = 1; i < n->curr_queues; i++) {
>> +        qemu_put_be32(f, n->vqs[i].tx_waiting);
>> +    }
>>  }
>>  
>>  static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
>>  {
>>      VirtIONet *n = opaque;
>> -    VirtIONetQueue *q = &n->vqs[0];
>> -    int ret, i;
>> +    int ret, i, link_down;
>>  
>>      if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
>>          return -EINVAL;
>> @@ -1100,7 +1106,7 @@ static int virtio_net_load(QEMUFile *f, void *opaque, 
>> int version_id)
>>      }
>>  
>>      qemu_get_buffer(f, n->mac, ETH_ALEN);
>> -    q->tx_waiting = qemu_get_be32(f);
>> +    n->vqs[0].tx_waiting = qemu_get_be32(f);
>>  
>>      virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
>>  
>> @@ -1170,6 +1176,22 @@ static int virtio_net_load(QEMUFile *f, void *opaque, 
>> int version_id)
>>          }
>>      }
>>  
>> +    if (version_id >= 12) {
>> +        if (n->max_queues != qemu_get_be16(f)) {
>> +            error_report("virtio-net: different max_queues ");
>> +            return -1;
>> +        }
>> +
>> +        n->curr_queues = qemu_get_be16(f);
>> +        for (i = 1; i < n->curr_queues; i++) {
>> +            n->vqs[i].tx_waiting = qemu_get_be32(f);
>> +        }
>> +    }
>> +
>> +    virtio_net_set_queues(n);
>> +    /* Must do this again, since we may have more than one active queues. */
> s/queues/queue/
>
> Also I didn't understand why it's here.
> It seems that virtio has vm running callback,
> and that will invoke virtio_net_set_status after vm load.
> No?

True, will remove it next version.

Thanks
>
>> +    virtio_net_set_status(&n->vdev, n->status);
>> +
>>      /* Find the first multicast entry in the saved MAC filter */
>>      for (i = 0; i < n->mac_table.in_use; i++) {
>>          if (n->mac_table.macs[i * ETH_ALEN] & 1) {
>> @@ -1180,7 +1202,10 @@ static int virtio_net_load(QEMUFile *f, void *opaque, 
>> int version_id)
>>  
>>      /* nc.link_down can't be migrated, so infer link_down according
>>       * to link status bit in n->status */
>> -    qemu_get_queue(n->nic)->link_down = (n->status & VIRTIO_NET_S_LINK_UP) 
>> == 0;
>> +    link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
>> +    for (i = 0; i < n->max_queues; i++) {
>> +        qemu_get_subqueue(n->nic, i)->link_down = link_down;
>> +    }
>>  
>>      return 0;
>>  }
>> -- 
>> 1.7.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]