qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH for-6.0 2/8] spapr/xive: Introduce spapr_xive_nr_ends()


From: Greg Kurz
Subject: Re: [PATCH for-6.0 2/8] spapr/xive: Introduce spapr_xive_nr_ends()
Date: Wed, 25 Nov 2020 23:43:26 +0100

On Mon, 23 Nov 2020 14:33:55 +1100
David Gibson <david@gibson.dropbear.id.au> wrote:

> On Fri, Nov 20, 2020 at 06:46:40PM +0100, Greg Kurz wrote:
> > We're going to kill the "nr_ends" field in a subsequent patch.
> > Prepare ground by using an helper instead of peeking into
> > the sPAPR XIVE structure directly.
> > 
> > Signed-off-by: Greg Kurz <groug@kaod.org>
> 
> Applied to ppc-for-6.0, thanks.
> 

I'm working on a new approach that doesn't need this change. Especially the
new approach doesn't kill the "nr_ends" fields, which makes the changelog of
this patch slightly wrong. Since it doesn't bring much in the end, maybe you
can just drop it from ppc-for-6.0 ?

> 
> > ---
> >  include/hw/ppc/spapr_xive.h |  1 +
> >  hw/intc/spapr_xive.c        | 23 ++++++++++++++---------
> >  hw/intc/spapr_xive_kvm.c    |  4 ++--
> >  3 files changed, 17 insertions(+), 11 deletions(-)
> > 
> > diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h
> > index 26c8d90d7196..4b967f13c030 100644
> > --- a/include/hw/ppc/spapr_xive.h
> > +++ b/include/hw/ppc/spapr_xive.h
> > @@ -75,6 +75,7 @@ void spapr_xive_map_mmio(SpaprXive *xive);
> >  
> >  int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
> >                               uint32_t *out_server, uint8_t *out_prio);
> > +uint32_t spapr_xive_nr_ends(const SpaprXive *xive);
> >  
> >  /*
> >   * KVM XIVE device helpers
> > diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
> > index 60e0d5769dcc..f473ad9cba47 100644
> > --- a/hw/intc/spapr_xive.c
> > +++ b/hw/intc/spapr_xive.c
> > @@ -192,7 +192,7 @@ void spapr_xive_pic_print_info(SpaprXive *xive, Monitor 
> > *mon)
> >              uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
> >              XiveEND *end;
> >  
> > -            assert(end_idx < xive->nr_ends);
> > +            assert(end_idx < spapr_xive_nr_ends(xive));
> >              end = &xive->endt[end_idx];
> >  
> >              if (xive_end_is_valid(end)) {
> > @@ -270,7 +270,7 @@ static void spapr_xive_reset(void *dev)
> >      }
> >  
> >      /* Clear all ENDs */
> > -    for (i = 0; i < xive->nr_ends; i++) {
> > +    for (i = 0; i < spapr_xive_nr_ends(xive); i++) {
> >          spapr_xive_end_reset(&xive->endt[i]);
> >      }
> >  }
> > @@ -288,6 +288,11 @@ static void spapr_xive_instance_init(Object *obj)
> >      xive->fd = -1;
> >  }
> >  
> > +uint32_t spapr_xive_nr_ends(const SpaprXive *xive)
> > +{
> > +    return xive->nr_ends;
> > +}
> > +
> >  static void spapr_xive_realize(DeviceState *dev, Error **errp)
> >  {
> >      SpaprXive *xive = SPAPR_XIVE(dev);
> > @@ -336,7 +341,7 @@ static void spapr_xive_realize(DeviceState *dev, Error 
> > **errp)
> >       * Allocate the routing tables
> >       */
> >      xive->eat = g_new0(XiveEAS, xive->nr_irqs);
> > -    xive->endt = g_new0(XiveEND, xive->nr_ends);
> > +    xive->endt = g_new0(XiveEND, spapr_xive_nr_ends(xive));
> >  
> >      xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
> >                             xive->tm_base + XIVE_TM_USER_PAGE * (1 << 
> > TM_SHIFT));
> > @@ -375,7 +380,7 @@ static int spapr_xive_get_end(XiveRouter *xrtr,
> >  {
> >      SpaprXive *xive = SPAPR_XIVE(xrtr);
> >  
> > -    if (end_idx >= xive->nr_ends) {
> > +    if (end_idx >= spapr_xive_nr_ends(xive)) {
> >          return -1;
> >      }
> >  
> > @@ -389,7 +394,7 @@ static int spapr_xive_write_end(XiveRouter *xrtr, 
> > uint8_t end_blk,
> >  {
> >      SpaprXive *xive = SPAPR_XIVE(xrtr);
> >  
> > -    if (end_idx >= xive->nr_ends) {
> > +    if (end_idx >= spapr_xive_nr_ends(xive)) {
> >          return -1;
> >      }
> >  
> > @@ -1138,7 +1143,7 @@ static target_ulong 
> > h_int_get_source_config(PowerPCCPU *cpu,
> >      /* EAS_END_BLOCK is unused on sPAPR */
> >      end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
> >  
> > -    assert(end_idx < xive->nr_ends);
> > +    assert(end_idx < spapr_xive_nr_ends(xive));
> >      end = &xive->endt[end_idx];
> >  
> >      nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
> > @@ -1216,7 +1221,7 @@ static target_ulong h_int_get_queue_info(PowerPCCPU 
> > *cpu,
> >          return H_P2;
> >      }
> >  
> > -    assert(end_idx < xive->nr_ends);
> > +    assert(end_idx < spapr_xive_nr_ends(xive));
> >      end = &xive->endt[end_idx];
> >  
> >      args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * 
> > end_idx;
> > @@ -1304,7 +1309,7 @@ static target_ulong h_int_set_queue_config(PowerPCCPU 
> > *cpu,
> >          return H_P2;
> >      }
> >  
> > -    assert(end_idx < xive->nr_ends);
> > +    assert(end_idx < spapr_xive_nr_ends(xive));
> >      memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
> >  
> >      switch (qsize) {
> > @@ -1470,7 +1475,7 @@ static target_ulong h_int_get_queue_config(PowerPCCPU 
> > *cpu,
> >          return H_P2;
> >      }
> >  
> > -    assert(end_idx < xive->nr_ends);
> > +    assert(end_idx < spapr_xive_nr_ends(xive));
> >      end = &xive->endt[end_idx];
> >  
> >      args[0] = 0;
> > diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c
> > index 66bf4c06fe55..1566016f0e28 100644
> > --- a/hw/intc/spapr_xive_kvm.c
> > +++ b/hw/intc/spapr_xive_kvm.c
> > @@ -531,7 +531,7 @@ static int kvmppc_xive_get_queues(SpaprXive *xive, 
> > Error **errp)
> >      int i;
> >      int ret;
> >  
> > -    for (i = 0; i < xive->nr_ends; i++) {
> > +    for (i = 0; i < spapr_xive_nr_ends(xive); i++) {
> >          if (!xive_end_is_valid(&xive->endt[i])) {
> >              continue;
> >          }
> > @@ -701,7 +701,7 @@ int kvmppc_xive_post_load(SpaprXive *xive, int 
> > version_id)
> >      assert(xive->fd != -1);
> >  
> >      /* Restore the ENDT first. The targetting depends on it. */
> > -    for (i = 0; i < xive->nr_ends; i++) {
> > +    for (i = 0; i < spapr_xive_nr_ends(xive); i++) {
> >          if (!xive_end_is_valid(&xive->endt[i])) {
> >              continue;
> >          }
> 

Attachment: pgpo4Ib0eYRsJ.pgp
Description: OpenPGP digital signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]