qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 04/18] virtio: Return error from virtqueue_next_


From: Fam Zheng
Subject: Re: [Qemu-devel] [PATCH 04/18] virtio: Return error from virtqueue_next_desc
Date: Tue, 21 Apr 2015 15:30:23 +0800
User-agent: Mutt/1.5.23 (2014-03-12)

On Tue, 04/21 08:37, Michael S. Tsirkin wrote:
> On Fri, Apr 17, 2015 at 03:59:19PM +0800, Fam Zheng wrote:
> > Two callers pass error_abort now, which can be changed to check return value
> > and pass the error on.
> > 
> > Signed-off-by: Fam Zheng <address@hidden>
> > ---
> >  hw/virtio/virtio.c | 27 ++++++++++++++++++---------
> >  1 file changed, 18 insertions(+), 9 deletions(-)
> > 
> > diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> > index a525f8e..2a24829 100644
> > --- a/hw/virtio/virtio.c
> > +++ b/hw/virtio/virtio.c
> > @@ -329,10 +329,11 @@ static int virtqueue_get_head(VirtQueue *vq, unsigned 
> > int idx,
> >      return head;
> >  }
> >  
> > -static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa,
> > -                                    unsigned int i, unsigned int max)
> > +static int virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa,
> > +                               unsigned int i, unsigned int max,
> > +                               Error **errp)
> >  {
> > -    unsigned int next;
> > +    int next;
> >  
> >      /* If this descriptor says it doesn't chain, we're done. */
> >      if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) {
> > @@ -345,8 +346,8 @@ static unsigned virtqueue_next_desc(VirtIODevice *vdev, 
> > hwaddr desc_pa,
> >      smp_wmb();
> >  
> >      if (next >= max) {
> > -        error_report("Desc next is %u", next);
> > -        exit(1);
> > +        error_setg(errp, "Desc next is %u", next);
> > +        return -EINVAL;
> 
> I think it's best to return max here. No need to change return type
> then.

We use negative return code elsewherer for reporting errors, I personally
prefer -EINVAL.  Are you concerned about overflow?

> 
> >      }
> >  
> >      return next;
> > @@ -392,7 +393,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned 
> > int *in_bytes,
> >              num_bufs = i = 0;
> >          }
> >  
> > -        do {
> > +        while (true) {
> >              /* If we've got too many, that implies a descriptor loop. */
> >              if (++num_bufs > max) {
> >                  error_report("Looped descriptor");
> > @@ -407,7 +408,11 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned 
> > int *in_bytes,
> >              if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
> >                  goto done;
> >              }
> > -        } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
> > +            i = virtqueue_next_desc(vdev, desc_pa, i, max, &error_abort);
> > +            if (i == max) {
> > +                break;
> > +            }
> > +        }
> >  
> >          if (!indirect)
> >              total_bufs = num_bufs;
> > @@ -493,7 +498,7 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
> >      }
> >  
> >      /* Collect all the descriptors */
> > -    do {
> > +    while (true) {
> >          struct iovec *sg;
> >  
> >          if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
> > @@ -519,7 +524,11 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement 
> > *elem)
> >              error_report("Looped descriptor");
> >              exit(1);
> >          }
> > -    } while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
> > +        i = virtqueue_next_desc(vdev, desc_pa, i, max, &error_abort);
> > +        if (i == max) {
> > +            break;
> > +        }
> > +    }
> >  
> 
> Why refactor this as part of this patch?

Graceful error handling will need to un-inline the loop condition, so refactor
it as we're touching the line.

Fam

> 
> >      /* Now map what we have collected */
> >      virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1,
> > -- 
> > 1.9.3



reply via email to

[Prev in Thread] Current Thread [Next in Thread]