qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v8 4/6] migration/ram.c: add ram save state noti


From: Dr. David Alan Gilbert
Subject: Re: [Qemu-devel] [PATCH v8 4/6] migration/ram.c: add ram save state notifiers
Date: Tue, 19 Jun 2018 18:31:09 +0100
User-agent: Mutt/1.10.0 (2018-05-17)

* Wei Wang (address@hidden) wrote:
> On 06/12/2018 03:50 PM, Peter Xu wrote:
> > On Fri, Jun 08, 2018 at 04:10:41PM +0800, Wei Wang wrote:
> > > This patch adds a ram save state notifier list, and expose RAMState for
> > > the notifer callbacks to use.
> > > 
> > > Signed-off-by: Wei Wang <address@hidden>
> > > CC: Dr. David Alan Gilbert <address@hidden>
> > > CC: Juan Quintela <address@hidden>
> > > CC: Michael S. Tsirkin <address@hidden>
> > > CC: Peter Xu <address@hidden>
> > > ---
> > >   include/migration/misc.h | 52 +++++++++++++++++++++++++++++++++++++++
> > >   migration/ram.c          | 64 
> > > +++++++++++++++++-------------------------------
> > >   2 files changed, 75 insertions(+), 41 deletions(-)
> > > 
> > > diff --git a/include/migration/misc.h b/include/migration/misc.h
> > > index 113320e..b970d7d 100644
> > > --- a/include/migration/misc.h
> > > +++ b/include/migration/misc.h
> > > @@ -16,9 +16,61 @@
> > >   #include "exec/cpu-common.h"
> > >   #include "qemu/notify.h"
> > > +#include "qemu/thread.h"
> > >   /* migration/ram.c */
> > > +typedef enum RamSaveState {
> > > +    RAM_SAVE_ERR = 0,
> > > +    RAM_SAVE_RESET = 1,
> > > +    RAM_SAVE_BEFORE_SYNC_BITMAP = 2,
> > > +    RAM_SAVE_AFTER_SYNC_BITMAP = 3,
> > > +    RAM_SAVE_MAX = 4,
> > > +} RamSaveState;
> > > +
> > > +/* State of RAM for migration */
> > > +typedef struct RAMState {
> > > +    /* QEMUFile used for this migration */
> > > +    QEMUFile *f;
> > > +    /* Last block that we have visited searching for dirty pages */
> > > +    RAMBlock *last_seen_block;
> > > +    /* Last block from where we have sent data */
> > > +    RAMBlock *last_sent_block;
> > > +    /* Last dirty target page we have sent */
> > > +    ram_addr_t last_page;
> > > +    /* last ram version we have seen */
> > > +    uint32_t last_version;
> > > +    /* We are in the first round */
> > > +    bool ram_bulk_stage;
> > > +    /* How many times we have dirty too many pages */
> > > +    int dirty_rate_high_cnt;
> > > +    /* ram save states used for notifiers */
> > > +    int ram_save_state;
> > > +    /* these variables are used for bitmap sync */
> > > +    /* last time we did a full bitmap_sync */
> > > +    int64_t time_last_bitmap_sync;
> > > +    /* bytes transferred at start_time */
> > > +    uint64_t bytes_xfer_prev;
> > > +    /* number of dirty pages since start_time */
> > > +    uint64_t num_dirty_pages_period;
> > > +    /* xbzrle misses since the beginning of the period */
> > > +    uint64_t xbzrle_cache_miss_prev;
> > > +    /* number of iterations at the beginning of period */
> > > +    uint64_t iterations_prev;
> > > +    /* Iterations since start */
> > > +    uint64_t iterations;
> > > +    /* number of dirty bits in the bitmap */
> > > +    uint64_t migration_dirty_pages;
> > > +    /* protects modification of the bitmap */
> > > +    QemuMutex bitmap_mutex;
> > > +    /* The RAMBlock used in the last src_page_requests */
> > > +    RAMBlock *last_req_rb;
> > > +    /* Queue of outstanding page requests from the destination */
> > > +    QemuMutex src_page_req_mutex;
> > > +    QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) 
> > > src_page_requests;
> > > +} RAMState;
> > Why move these chunk?  Can it be avoided?
> > 
> > > +
> > > +void add_ram_save_state_change_notifier(Notifier *notify);
> > >   void ram_mig_init(void);
> > >   void qemu_guest_free_page_hint(void *addr, size_t len);
> > > diff --git a/migration/ram.c b/migration/ram.c
> > > index 237f11e..d45b5a4 100644
> > > --- a/migration/ram.c
> > > +++ b/migration/ram.c
> > > @@ -56,6 +56,9 @@
> > >   #include "qemu/uuid.h"
> > >   #include "savevm.h"
> > > +static NotifierList ram_save_state_notifiers =
> > > +    NOTIFIER_LIST_INITIALIZER(ram_save_state_notifiers);
> > > +
> > >   /***********************************************************/
> > >   /* ram save/restore */
> > > @@ -267,49 +270,18 @@ struct RAMSrcPageRequest {
> > >       QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
> > >   };
> > > -/* State of RAM for migration */
> > > -struct RAMState {
> > > -    /* QEMUFile used for this migration */
> > > -    QEMUFile *f;
> > > -    /* Last block that we have visited searching for dirty pages */
> > > -    RAMBlock *last_seen_block;
> > > -    /* Last block from where we have sent data */
> > > -    RAMBlock *last_sent_block;
> > > -    /* Last dirty target page we have sent */
> > > -    ram_addr_t last_page;
> > > -    /* last ram version we have seen */
> > > -    uint32_t last_version;
> > > -    /* We are in the first round */
> > > -    bool ram_bulk_stage;
> > > -    /* How many times we have dirty too many pages */
> > > -    int dirty_rate_high_cnt;
> > > -    /* these variables are used for bitmap sync */
> > > -    /* last time we did a full bitmap_sync */
> > > -    int64_t time_last_bitmap_sync;
> > > -    /* bytes transferred at start_time */
> > > -    uint64_t bytes_xfer_prev;
> > > -    /* number of dirty pages since start_time */
> > > -    uint64_t num_dirty_pages_period;
> > > -    /* xbzrle misses since the beginning of the period */
> > > -    uint64_t xbzrle_cache_miss_prev;
> > > -    /* number of iterations at the beginning of period */
> > > -    uint64_t iterations_prev;
> > > -    /* Iterations since start */
> > > -    uint64_t iterations;
> > > -    /* number of dirty bits in the bitmap */
> > > -    uint64_t migration_dirty_pages;
> > > -    /* protects modification of the bitmap */
> > > -    QemuMutex bitmap_mutex;
> > > -    /* The RAMBlock used in the last src_page_requests */
> > > -    RAMBlock *last_req_rb;
> > > -    /* Queue of outstanding page requests from the destination */
> > > -    QemuMutex src_page_req_mutex;
> > > -    QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) 
> > > src_page_requests;
> > > -};
> > > -typedef struct RAMState RAMState;
> > > -
> > >   static RAMState *ram_state;
> > > +void add_ram_save_state_change_notifier(Notifier *notify)
> > > +{
> > > +    notifier_list_add(&ram_save_state_notifiers, notify);
> > > +}
> > > +
> > > +static void notify_ram_save_state_change_notifier(void)
> > > +{
> > > +    notifier_list_notify(&ram_save_state_notifiers, ram_state);
> > > +}
> > > +
> > >   uint64_t ram_bytes_remaining(void)
> > >   {
> > >       return ram_state ? (ram_state->migration_dirty_pages * 
> > > TARGET_PAGE_SIZE) :
> > > @@ -1139,6 +1111,9 @@ static void migration_bitmap_sync(RAMState *rs)
> > >       int64_t end_time;
> > >       uint64_t bytes_xfer_now;
> > > +    rs->ram_save_state = RAM_SAVE_BEFORE_SYNC_BITMAP;
> > What's the point to keep this state after all?...
> > 
> > > +    notify_ram_save_state_change_notifier();
> > > +
> > >       ram_counters.dirty_sync_count++;
> > >       if (!rs->time_last_bitmap_sync) {
> > > @@ -1205,6 +1180,9 @@ static void migration_bitmap_sync(RAMState *rs)
> > >       if (migrate_use_events()) {
> > >           qapi_event_send_migration_pass(ram_counters.dirty_sync_count, 
> > > NULL);
> > >       }
> > > +
> > > +    rs->ram_save_state = RAM_SAVE_AFTER_SYNC_BITMAP;
> > > +    notify_ram_save_state_change_notifier();
> > >   }
> > >   /**
> > > @@ -1961,6 +1939,8 @@ static void ram_state_reset(RAMState *rs)
> > >       rs->last_page = 0;
> > >       rs->last_version = ram_list.version;
> > >       rs->ram_bulk_stage = true;
> > > +    rs->ram_save_state = RAM_SAVE_RESET;
> > ... and this state is much more meaningless afaict ...
> > 
> > > +    notify_ram_save_state_change_notifier();
> > >   }
> > >   #define MAX_WAIT 50 /* ms, half buffered_file limit */
> > > @@ -2709,6 +2689,8 @@ out:
> > >       ret = qemu_file_get_error(f);
> > >       if (ret < 0) {
> > > +        rs->ram_save_state = RAM_SAVE_ERR;
> > > +        notify_ram_save_state_change_notifier();
> > >           return ret;
> > >       }
> > Also, I would prefer to add a general event framework for migration
> > rather than "ram save" specific.  Then we add SYNC_START/SYNC_END
> > events.  These notifiers aren't a lot, it'll be a bit awkward to
> > introduce the framework multiple times for similar purpuse (after we
> > have a common notifier we can merge the postcopy notifiers AFAIU).
> 
> Hi Peter,
> 
> Thanks for the review. We just got a little accident in the kernel part,
> which may cause some modifications to the QEMU side interfaces. So I will
> implement a new version and let you review in v9 patches.

Yes, but please do try and keep RAMState private to ram.c.

Dave

> Best,
> Wei
> 
--
Dr. David Alan Gilbert / address@hidden / Manchester, UK



reply via email to

[Prev in Thread] Current Thread [Next in Thread]