qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC 15/52] migration/postcopy-ram: Use generic topology name and helper


From: Zhao Liu
Subject: [RFC 15/52] migration/postcopy-ram: Use generic topology name and helper
Date: Mon, 13 Feb 2023 17:49:58 +0800

From: Zhao Liu <zhao1.liu@intel.com>

As the generic code, here we should respect the different topologies:
smp or hybrid.

So rename PostcopyBlocktimeContext.smp_cpus_down to
PostcopyBlocktimeContext.cpus_down, and also rename other local
variables from smp_cpus to cpus_num, to decouple with smp topology.

And use generic topology helpers to get topology information.

Cc: Juan Quintela <quintela@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
---
 migration/postcopy-ram.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 53299b7a5ebd..1e861e313258 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -122,7 +122,7 @@ typedef struct PostcopyBlocktimeContext {
     /* point in time when last page fault was initiated */
     uint32_t last_begin;
     /* number of vCPU are suspended */
-    int smp_cpus_down;
+    int cpus_down;
     uint64_t start_time;
 
     /*
@@ -150,11 +150,11 @@ static void migration_exit_cb(Notifier *n, void *data)
 static struct PostcopyBlocktimeContext *blocktime_context_new(void)
 {
     MachineState *ms = MACHINE(qdev_get_machine());
-    unsigned int smp_cpus = ms->smp.cpus;
+    unsigned int cpus_num = machine_topo_get_cpus(ms);
     PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
-    ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
-    ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
-    ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
+    ctx->page_fault_vcpu_time = g_new0(uint32_t, cpus_num);
+    ctx->vcpu_addr = g_new0(uintptr_t, cpus_num);
+    ctx->vcpu_blocktime = g_new0(uint32_t, cpus_num);
 
     ctx->exit_notifier.notify = migration_exit_cb;
     ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
@@ -168,7 +168,7 @@ static uint32List 
*get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
     uint32List *list = NULL;
     int i;
 
-    for (i = ms->smp.cpus - 1; i >= 0; i--) {
+    for (i = machine_topo_get_cpus(ms) - 1; i >= 0; i--) {
         QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]);
     }
 
@@ -798,7 +798,7 @@ static void mark_postcopy_blocktime_begin(uintptr_t addr, 
uint32_t ptid,
 
     low_time_offset = get_low_time_offset(dc);
     if (dc->vcpu_addr[cpu] == 0) {
-        qatomic_inc(&dc->smp_cpus_down);
+        qatomic_inc(&dc->cpus_down);
     }
 
     qatomic_xchg(&dc->last_begin, low_time_offset);
@@ -814,7 +814,7 @@ static void mark_postcopy_blocktime_begin(uintptr_t addr, 
uint32_t ptid,
     if (already_received) {
         qatomic_xchg(&dc->vcpu_addr[cpu], 0);
         qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
-        qatomic_dec(&dc->smp_cpus_down);
+        qatomic_dec(&dc->cpus_down);
     }
     trace_mark_postcopy_blocktime_begin(addr, dc, 
dc->page_fault_vcpu_time[cpu],
                                         cpu, already_received);
@@ -852,7 +852,7 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
     MigrationIncomingState *mis = migration_incoming_get_current();
     PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
     MachineState *ms = MACHINE(qdev_get_machine());
-    unsigned int smp_cpus = ms->smp.cpus;
+    unsigned int cpus_num = machine_topo_get_cpus(ms);
     int i, affected_cpu = 0;
     bool vcpu_total_blocktime = false;
     uint32_t read_vcpu_time, low_time_offset;
@@ -866,7 +866,7 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
      * that algorithm looks straightforward, but it's not
      * optimal, more optimal algorithm is keeping tree or hash
      * where key is address value is a list of  */
-    for (i = 0; i < smp_cpus; i++) {
+    for (i = 0; i < cpus_num; i++) {
         uint32_t vcpu_blocktime = 0;
 
         read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
@@ -881,14 +881,14 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
          * faulted page, another possible case it's prefetched
          * page and in that case we shouldn't be here */
         if (!vcpu_total_blocktime &&
-            qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
+            qatomic_fetch_add(&dc->cpus_down, 0) == cpus_num) {
             vcpu_total_blocktime = true;
         }
         /* continue cycle, due to one page could affect several vCPUs */
         dc->vcpu_blocktime[i] += vcpu_blocktime;
     }
 
-    qatomic_sub(&dc->smp_cpus_down, affected_cpu);
+    qatomic_sub(&dc->cpus_down, affected_cpu);
     if (vcpu_total_blocktime) {
         dc->total_blocktime += low_time_offset - qatomic_fetch_add(
                 &dc->last_begin, 0);
-- 
2.34.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]