qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v4 4/6] numa, pc-dimm: Store pc-dimm memory info


From: Bharata B Rao
Subject: Re: [Qemu-devel] [PATCH v4 4/6] numa, pc-dimm: Store pc-dimm memory information in numa_info
Date: Thu, 2 Jul 2015 09:37:28 +0530
User-agent: Mutt/1.5.23 (2014-03-12)

On Mon, Jun 29, 2015 at 04:53:12PM +0200, Igor Mammedov wrote:
> On Mon, 29 Jun 2015 19:11:30 +0530
> Bharata B Rao <address@hidden> wrote:
> 
> > On Mon, Jun 29, 2015 at 02:08:20PM +0200, Igor Mammedov wrote:
> > > On Mon, 29 Jun 2015 13:50:25 +0530
> > > Bharata B Rao <address@hidden> wrote:
> > > 
> > > > Start storing the (start_addr, end_addr) of the pc-dimm memory
> > > > in corresponding numa_info[node] so that this information can be used
> > > > to lookup node by address.
> > > > 
> > > > Signed-off-by: Bharata B Rao <address@hidden>
> > > Reviewed-by: Igor Mammedov <address@hidden>
> > > 
> > > > ---
> > > >  hw/mem/pc-dimm.c      |  4 ++++
> > > >  include/sysemu/numa.h | 10 ++++++++++
> > > >  numa.c                | 26 ++++++++++++++++++++++++++
> > > >  3 files changed, 40 insertions(+)
> > > > 
> > > > diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
> > > > index 98971b7..bb04862 100644
> > > > --- a/hw/mem/pc-dimm.c
> > > > +++ b/hw/mem/pc-dimm.c
> > > > @@ -97,6 +97,7 @@ void pc_dimm_memory_plug(DeviceState *dev, 
> > > > MemoryHotplugState *hpms,
> > > >  
> > > >      memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
> > > >      vmstate_register_ram(mr, dev);
> > > > +    numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node);
> > > >  
> > > >  out:
> > > >      error_propagate(errp, local_err);
> > > > @@ -105,6 +106,9 @@ out:
> > > >  void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
> > > >                             MemoryRegion *mr)
> > > >  {
> > > > +    PCDIMMDevice *dimm = PC_DIMM(dev);
> > > > +
> > > > +    numa_unset_mem_node_id(dimm->addr, memory_region_size(mr), 
> > > > dimm->node);
> > > >      memory_region_del_subregion(&hpms->mr, mr);
> > > >      vmstate_unregister_ram(mr, dev);
> > > >  }
> > > > diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
> > > > index 6523b4d..7176364 100644
> > > > --- a/include/sysemu/numa.h
> > > > +++ b/include/sysemu/numa.h
> > > > @@ -10,16 +10,26 @@
> > > >  
> > > >  extern int nb_numa_nodes;   /* Number of NUMA nodes */
> > > >  
> > > > +struct numa_addr_range {
> > > > +    ram_addr_t mem_start;
> > > > +    ram_addr_t mem_end;
> > > > +    QLIST_ENTRY(numa_addr_range) entry;
> > > > +};
> > > > +
> > > >  typedef struct node_info {
> > > >      uint64_t node_mem;
> > > >      DECLARE_BITMAP(node_cpu, MAX_CPUMASK_BITS);
> > > >      struct HostMemoryBackend *node_memdev;
> > > >      bool present;
> > > > +    QLIST_HEAD(, numa_addr_range) addr; /* List to store address 
> > > > ranges */
> > > >  } NodeInfo;
> > > > +
> > > >  extern NodeInfo numa_info[MAX_NODES];
> > > >  void parse_numa_opts(MachineClass *mc);
> > > >  void numa_post_machine_init(void);
> > > >  void query_numa_node_mem(uint64_t node_mem[]);
> > > >  extern QemuOptsList qemu_numa_opts;
> > > > +void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t 
> > > > node);
> > > > +void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t 
> > > > node);
> > > >  
> > > >  #endif
> > > > diff --git a/numa.c b/numa.c
> > > > index 91fc6c1..116d1fb 100644
> > > > --- a/numa.c
> > > > +++ b/numa.c
> > > > @@ -52,6 +52,28 @@ static int max_numa_nodeid; /* Highest specified 
> > > > NUMA node ID, plus one.
> > > >  int nb_numa_nodes;
> > > >  NodeInfo numa_info[MAX_NODES];
> > > >  
> > > > +void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t 
> > > > node)
> > > > +{
> > > > +    struct numa_addr_range *range = g_malloc0(sizeof(*range));
> > > > +
> > > > +    range->mem_start = addr;
> > > > +    range->mem_end = addr + size - 1;
> > > nit:
> > >  as a patch on top of it, add asserts that check for overflow, pls
> > 
> > You suggested g_assert(size) in the previous version.
> > 
> > However size can be zero when this API is called for boot time memory
> > and I have taken care of that in the next patch (5/6).
> > 
> > And for pc-dimm memory, the size can never be zero.
> > 
> > So do you still think overflow is possible ?
> make build on 32-bit host and theoretically with ram_addr_t==32bits + size == 
> uint64_t it could be possible,
> 
> overflow check here doesn't harm in any way and saves headache of debugging 
> when overflow happens.

Something like below which applies on Eduardo's numa branch ?

numa: Check for overflow in numa_[un]set_mem_node_id()

From: Bharata B Rao <address@hidden>

Assert when (addr + size) calculation overflows.

Signed-off-by: Bharata B Rao <address@hidden>
---
 numa.c |    9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/numa.c b/numa.c
index 3c80059..c2fc7aa 100644
--- a/numa.c
+++ b/numa.c
@@ -65,16 +65,21 @@ void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, 
uint32_t node)
     }
 
     range->mem_start = addr;
-    range->mem_end = addr + size - 1;
+    range->mem_end = addr + size;
+    g_assert(range->mem_end >= MAX(addr, size));
+    range->mem_end -= 1;
     QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry);
 }
 
 void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
 {
     struct numa_addr_range *range, *next;
+    ram_addr_t end = addr + size;
 
+    g_assert(end >= MAX(addr, size));
+    end -= 1;
     QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) {
-        if (addr == range->mem_start && (addr + size - 1) == range->mem_end) {
+        if (addr == range->mem_start && end == range->mem_end) {
             QLIST_REMOVE(range, entry);
             g_free(range);
             return;




reply via email to

[Prev in Thread] Current Thread [Next in Thread]