qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 03/23] multifd: Rename used field to num


From: Dr. David Alan Gilbert
Subject: Re: [PATCH v3 03/23] multifd: Rename used field to num
Date: Mon, 13 Dec 2021 15:17:58 +0000
User-agent: Mutt/2.1.3 (2021-09-10)

* Zheng Chuan (zhengchuan@huawei.com) wrote:
> Hi, Juan,
> 
> Sorry, forget to send to qemu-devel, resend it.
> 
> On 2021/11/24 18:05, Juan Quintela wrote:
> > We will need to split it later in zero_num (number of zero pages) and
> > normal_num (number of normal pages).  This name is better.
> > 
> > Signed-off-by: Juan Quintela <quintela@redhat.com>
> > ---
> >  migration/multifd.h |  2 +-
> >  migration/multifd.c | 38 +++++++++++++++++++-------------------
> >  2 files changed, 20 insertions(+), 20 deletions(-)
> > 
> > diff --git a/migration/multifd.h b/migration/multifd.h
> > index 15c50ca0b2..86820dd028 100644
> > --- a/migration/multifd.h
> > +++ b/migration/multifd.h
> > @@ -55,7 +55,7 @@ typedef struct {
> >  
> >  typedef struct {
> >      /* number of used pages */
> > -    uint32_t used;
> > +    uint32_t num;
> >      /* number of allocated pages */
> >      uint32_t allocated;
> >      /* global number of generated multifd packets */
> > diff --git a/migration/multifd.c b/migration/multifd.c
> > index 8125d0015c..8ea86d81dc 100644
> > --- a/migration/multifd.c
> > +++ b/migration/multifd.c
> > @@ -252,7 +252,7 @@ static MultiFDPages_t *multifd_pages_init(size_t size)
> >  
> >  static void multifd_pages_clear(MultiFDPages_t *pages)
> >  {
> > -    pages->used = 0;
> > +    pages->num = 0;
> >      pages->allocated = 0;
> >      pages->packet_num = 0;
> >      pages->block = NULL;
> > @@ -270,7 +270,7 @@ static void multifd_send_fill_packet(MultiFDSendParams 
> > *p)
> >  
> >      packet->flags = cpu_to_be32(p->flags);
> >      packet->pages_alloc = cpu_to_be32(p->pages->allocated);
> > -    packet->pages_used = cpu_to_be32(p->pages->used);
> > +    packet->pages_used = cpu_to_be32(p->pages->num);
> >      packet->next_packet_size = cpu_to_be32(p->next_packet_size);
> >      packet->packet_num = cpu_to_be64(p->packet_num);
> >  
> > @@ -278,7 +278,7 @@ static void multifd_send_fill_packet(MultiFDSendParams 
> > *p)
> >          strncpy(packet->ramblock, p->pages->block->idstr, 256);
> >      }
> >  
> > -    for (i = 0; i < p->pages->used; i++) {
> > +    for (i = 0; i < p->pages->num; i++) {
> >          /* there are architectures where ram_addr_t is 32 bit */
> >          uint64_t temp = p->pages->offset[i];
> >  
> > @@ -332,18 +332,18 @@ static int 
> > multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
> >          p->pages = multifd_pages_init(packet->pages_alloc);
> >      }
> >  
> > -    p->pages->used = be32_to_cpu(packet->pages_used);
> > -    if (p->pages->used > packet->pages_alloc) {
> > +    p->pages->num = be32_to_cpu(packet->pages_used);
> > +    if (p->pages->num > packet->pages_alloc) {
> >          error_setg(errp, "multifd: received packet "
> >                     "with %d pages and expected maximum pages are %d",
> > -                   p->pages->used, packet->pages_alloc) ;
> > +                   p->pages->num, packet->pages_alloc) ;
> >          return -1;
> >      }
> >  
> >      p->next_packet_size = be32_to_cpu(packet->next_packet_size);
> >      p->packet_num = be64_to_cpu(packet->packet_num);
> >  
> > -    if (p->pages->used == 0) {
> > +    if (p->pages->num == 0) {
> >          return 0;
> >      }
> >  
> > @@ -356,7 +356,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams 
> > *p, Error **errp)
> >          return -1;
> >      }
> >  
> > -    for (i = 0; i < p->pages->used; i++) {
> > +    for (i = 0; i < p->pages->num; i++) {
> >          uint64_t offset = be64_to_cpu(packet->offset[i]);
> >  
> >          if (offset > (block->used_length - page_size)) {
> > @@ -443,13 +443,13 @@ static int multifd_send_pages(QEMUFile *f)
> >          }
> >          qemu_mutex_unlock(&p->mutex);
> >      }
> > -    assert(!p->pages->used);
> > +    assert(!p->pages->num);
> >      assert(!p->pages->block);
> >  
> >      p->packet_num = multifd_send_state->packet_num++;
> >      multifd_send_state->pages = p->pages;
> >      p->pages = pages;
> > -    transferred = ((uint64_t) pages->used) * qemu_target_page_size()
> > +    transferred = ((uint64_t) pages->num) * qemu_target_page_size()
> >                  + p->packet_len;
> The size of zero page should not regard as the whole pagesize.
> I think the transferred should be updated after you introduce zero_num in 
> following patches, such as:
> +    transferred = ((uint64_t) p->normal_num) * qemu_target_page_size()
> +               + ((uint64_t) p->zero_num) * sizeof(uint64_t);
> Otherwise, migration time will get worse if we have low bandwidth limit 
> parameter.
> 
> I tested it with bandwidth limit of 100MB/s and it works fine:)

Yes I think you're right; 'transferred' is normally a measure of used
network bandwidth.

Dave

> >      qemu_file_update_transfer(f, transferred);
> >      ram_counters.multifd_bytes += transferred;
> > @@ -469,12 +469,12 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, 
> > ram_addr_t offset)
> >      }
> >  
> >      if (pages->block == block) {
> > -        pages->offset[pages->used] = offset;
> > -        pages->iov[pages->used].iov_base = block->host + offset;
> > -        pages->iov[pages->used].iov_len = qemu_target_page_size();
> > -        pages->used++;
> > +        pages->offset[pages->num] = offset;
> > +        pages->iov[pages->num].iov_base = block->host + offset;
> > +        pages->iov[pages->num].iov_len = qemu_target_page_size();
> > +        pages->num++;
> >  
> > -        if (pages->used < pages->allocated) {
> > +        if (pages->num < pages->allocated) {
> >              return 1;
> >          }
> >      }
> > @@ -586,7 +586,7 @@ void multifd_send_sync_main(QEMUFile *f)
> >      if (!migrate_use_multifd()) {
> >          return;
> >      }
> > -    if (multifd_send_state->pages->used) {
> > +    if (multifd_send_state->pages->num) {
> >          if (multifd_send_pages(f) < 0) {
> >              error_report("%s: multifd_send_pages fail", __func__);
> >              return;
> > @@ -649,7 +649,7 @@ static void *multifd_send_thread(void *opaque)
> >          qemu_mutex_lock(&p->mutex);
> >  
> >          if (p->pending_job) {
> > -            uint32_t used = p->pages->used;
> > +            uint32_t used = p->pages->num;
> >              uint64_t packet_num = p->packet_num;
> >              flags = p->flags;
> >  
> > @@ -665,7 +665,7 @@ static void *multifd_send_thread(void *opaque)
> >              p->flags = 0;
> >              p->num_packets++;
> >              p->num_pages += used;
> > -            p->pages->used = 0;
> > +            p->pages->num = 0;
> >              p->pages->block = NULL;
> >              qemu_mutex_unlock(&p->mutex);
> >  
> > @@ -1091,7 +1091,7 @@ static void *multifd_recv_thread(void *opaque)
> >              break;
> >          }
> >  
> > -        used = p->pages->used;
> > +        used = p->pages->num;
> >          flags = p->flags;
> >          /* recv methods don't know how to handle the SYNC flag */
> >          p->flags &= ~MULTIFD_FLAG_SYNC;
> > 
> 
> -- 
> Regards.
> Chuan
> 
-- 
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK




reply via email to

[Prev in Thread] Current Thread [Next in Thread]