[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v4 2/7] vdpa: Extract vhost_vdpa_net_cvq_add from vhost_vdpa_
From: |
Eugenio Perez Martin |
Subject: |
Re: [PATCH v4 2/7] vdpa: Extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail |
Date: |
Mon, 1 Aug 2022 09:35:09 +0200 |
On Tue, Jul 26, 2022 at 4:50 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> 在 2022/7/22 19:12, Eugenio Pérez 写道:
> > So we can reuse to inject state messages.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > net/vhost-vdpa.c | 74 ++++++++++++++++++++++++++++++------------------
> > 1 file changed, 47 insertions(+), 27 deletions(-)
> >
> > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> > index 6abad276a6..1b82ac2e07 100644
> > --- a/net/vhost-vdpa.c
> > +++ b/net/vhost-vdpa.c
> > @@ -334,6 +334,46 @@ static bool vhost_vdpa_net_cvq_map_elem(VhostVDPAState
> > *s,
> > return true;
> > }
> >
> > +static virtio_net_ctrl_ack vhost_vdpa_net_cvq_add(VhostShadowVirtqueue
> > *svq,
> > + const struct iovec
> > *dev_buffers)
>
>
> Let's make this support any layout by accepting in/out sg.
>
I'll change for the next version.
>
> > +{
> > + /* in buffer used for device model */
> > + virtio_net_ctrl_ack status;
> > + size_t dev_written;
> > + int r;
> > +
> > + /*
> > + * Add a fake non-NULL VirtQueueElement since we'll remove before SVQ
> > + * event loop can get it.
> > + */
> > + r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, (void
> > *)1);
>
>
> I'd suggest to avoid the trick like (void *)1, which is usually a hint
> of the defect of the API.
>
> We can either:
>
> 1) make vhost_svq_get() check ndescs instead of elem
>
> or
>
> 2) simple pass sg
>
Option one sounds great actually, let me try it and I'll send a new version.
Thanks!
> Thanks
>
>
> > + if (unlikely(r != 0)) {
> > + if (unlikely(r == -ENOSPC)) {
> > + qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device
> > queue\n",
> > + __func__);
> > + }
> > + return VIRTIO_NET_ERR;
> > + }
> > +
> > + /*
> > + * We can poll here since we've had BQL from the time we sent the
> > + * descriptor. Also, we need to take the answer before SVQ pulls by
> > itself,
> > + * when BQL is released
> > + */
> > + dev_written = vhost_svq_poll(svq);
> > + if (unlikely(dev_written < sizeof(status))) {
> > + error_report("Insufficient written data (%zu)", dev_written);
> > + return VIRTIO_NET_ERR;
> > + }
> > +
> > + memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
> > + if (status != VIRTIO_NET_OK) {
> > + return VIRTIO_NET_ERR;
> > + }
> > +
> > + return VIRTIO_NET_OK;
> > +}
> > +
> > /**
> > * Do not forward commands not supported by SVQ. Otherwise, the device
> > could
> > * accept it and qemu would not know how to update the device model.
> > @@ -380,19 +420,18 @@ static int
> > vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
> > void *opaque)
> > {
> > VhostVDPAState *s = opaque;
> > - size_t in_len, dev_written;
> > + size_t in_len;
> > virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
> > /* out and in buffers sent to the device */
> > struct iovec dev_buffers[2] = {
> > { .iov_base = s->cvq_cmd_out_buffer },
> > { .iov_base = s->cvq_cmd_in_buffer },
> > };
> > - /* in buffer used for device model */
> > + /* in buffer seen by virtio-net device model */
> > const struct iovec in = {
> > .iov_base = &status,
> > .iov_len = sizeof(status),
> > };
> > - int r = -EINVAL;
> > bool ok;
> >
> > ok = vhost_vdpa_net_cvq_map_elem(s, elem, dev_buffers);
> > @@ -405,35 +444,16 @@ static int
> > vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
> > goto out;
> > }
> >
> > - r = vhost_svq_add(svq, &dev_buffers[0], 1, &dev_buffers[1], 1, elem);
> > - if (unlikely(r != 0)) {
> > - if (unlikely(r == -ENOSPC)) {
> > - qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device
> > queue\n",
> > - __func__);
> > - }
> > - goto out;
> > - }
> > -
> > - /*
> > - * We can poll here since we've had BQL from the time we sent the
> > - * descriptor. Also, we need to take the answer before SVQ pulls by
> > itself,
> > - * when BQL is released
> > - */
> > - dev_written = vhost_svq_poll(svq);
> > - if (unlikely(dev_written < sizeof(status))) {
> > - error_report("Insufficient written data (%zu)", dev_written);
> > - goto out;
> > - }
> > -
> > - memcpy(&status, dev_buffers[1].iov_base, sizeof(status));
> > + status = vhost_vdpa_net_cvq_add(svq, dev_buffers);
> > if (status != VIRTIO_NET_OK) {
> > goto out;
> > }
> >
> > status = VIRTIO_NET_ERR;
> > - virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
> > - if (status != VIRTIO_NET_OK) {
> > + in_len = virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, dev_buffers, 1);
> > + if (in_len != sizeof(status) || status != VIRTIO_NET_OK) {
> > error_report("Bad CVQ processing in model");
> > + return VIRTIO_NET_ERR;
> > }
> >
> > out:
> > @@ -450,7 +470,7 @@ out:
> > if (dev_buffers[1].iov_base) {
> > vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, dev_buffers[1].iov_base);
> > }
> > - return r;
> > + return status == VIRTIO_NET_OK ? 0 : 1;
> > }
> >
> > static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
>
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- Re: [PATCH v4 2/7] vdpa: Extract vhost_vdpa_net_cvq_add from vhost_vdpa_net_handle_ctrl_avail,
Eugenio Perez Martin <=