qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v3 3/5] numa, pc-dimm: Store pc-dimm memory informat


From: Bharata B Rao
Subject: [Qemu-devel] [PATCH v3 3/5] numa, pc-dimm: Store pc-dimm memory information in numa_info
Date: Fri, 26 Jun 2015 09:36:02 +0530

Start storing the (start_addr, size, nodeid) of the pc-dimm memory
in numa_info so that this information can be used to lookup
node by address.

Signed-off-by: Bharata B Rao <address@hidden>
Reviewed-by: David Gibson <address@hidden>
---
 hw/mem/pc-dimm.c      |  4 ++++
 include/sysemu/numa.h | 10 ++++++++++
 numa.c                | 26 ++++++++++++++++++++++++++
 3 files changed, 40 insertions(+)

diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 98971b7..bb04862 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -97,6 +97,7 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState 
*hpms,
 
     memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
     vmstate_register_ram(mr, dev);
+    numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node);
 
 out:
     error_propagate(errp, local_err);
@@ -105,6 +106,9 @@ out:
 void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
                            MemoryRegion *mr)
 {
+    PCDIMMDevice *dimm = PC_DIMM(dev);
+
+    numa_unset_mem_node_id(dimm->addr, memory_region_size(mr), dimm->node);
     memory_region_del_subregion(&hpms->mr, mr);
     vmstate_unregister_ram(mr, dev);
 }
diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
index 6523b4d..7176364 100644
--- a/include/sysemu/numa.h
+++ b/include/sysemu/numa.h
@@ -10,16 +10,26 @@
 
 extern int nb_numa_nodes;   /* Number of NUMA nodes */
 
+struct numa_addr_range {
+    ram_addr_t mem_start;
+    ram_addr_t mem_end;
+    QLIST_ENTRY(numa_addr_range) entry;
+};
+
 typedef struct node_info {
     uint64_t node_mem;
     DECLARE_BITMAP(node_cpu, MAX_CPUMASK_BITS);
     struct HostMemoryBackend *node_memdev;
     bool present;
+    QLIST_HEAD(, numa_addr_range) addr; /* List to store address ranges */
 } NodeInfo;
+
 extern NodeInfo numa_info[MAX_NODES];
 void parse_numa_opts(MachineClass *mc);
 void numa_post_machine_init(void);
 void query_numa_node_mem(uint64_t node_mem[]);
 extern QemuOptsList qemu_numa_opts;
+void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
+void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
 
 #endif
diff --git a/numa.c b/numa.c
index 91fc6c1..1f58231 100644
--- a/numa.c
+++ b/numa.c
@@ -52,6 +52,28 @@ static int max_numa_nodeid; /* Highest specified NUMA node 
ID, plus one.
 int nb_numa_nodes;
 NodeInfo numa_info[MAX_NODES];
 
+void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
+{
+    struct numa_addr_range *range = g_malloc0(sizeof(*range));
+
+    range->mem_start = addr;
+    range->mem_end = addr + size;
+    QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry);
+}
+
+void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
+{
+    struct numa_addr_range *range, *next;
+
+    QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) {
+        if (addr == range->mem_start && (addr + size) == range->mem_end) {
+            QLIST_REMOVE(range, entry);
+            g_free(range);
+            return;
+        }
+    }
+}
+
 static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error 
**errp)
 {
     uint16_t nodenr;
@@ -274,6 +296,10 @@ void parse_numa_opts(MachineClass *mc)
         }
 
         for (i = 0; i < nb_numa_nodes; i++) {
+            QLIST_INIT(&numa_info[i].addr);
+        }
+
+        for (i = 0; i < nb_numa_nodes; i++) {
             if (!bitmap_empty(numa_info[i].node_cpu, MAX_CPUMASK_BITS)) {
                 break;
             }
-- 
2.1.0




reply via email to

[Prev in Thread] Current Thread [Next in Thread]