qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 2/3] QIOChannelSocket: Implement io_async_write & io_async


From: Leonardo Bras Soares Passos
Subject: Re: [PATCH v3 2/3] QIOChannelSocket: Implement io_async_write & io_async_flush
Date: Wed, 29 Sep 2021 16:32:12 -0300

Hello Daniel,

On Fri, Sep 24, 2021 at 2:38 PM Daniel P. Berrangé <berrange@redhat.com> wrote:
[...]
> > @@ -154,6 +171,19 @@ int qio_channel_socket_connect_sync(QIOChannelSocket 
> > *ioc,
> >          return -1;
> >      }
> >
> > +#ifdef CONFIG_LINUX
> > +    if (addr->type != SOCKET_ADDRESS_TYPE_INET) {
> > +        return 0;
> > +    }
> > +
> > +    ret = qemu_setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &v, sizeof(v));
> > +    if (ret >= 0) {
> > +        QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
> > +        klass->io_async_writev = qio_channel_socket_async_writev;
> > +        klass->io_async_flush = qio_channel_socket_async_flush;
> > +    }
> > +#endif
>
> This is not write - the async APIs should not be tied 1:1 to ZEROCOPY
> usage - we should have them take a flag to request ZEROCOPY behaviour.

I agree, but I am not aware of how to do asynchronous send in a socket
without MSG_ZEROCOPY.

I mean, I know of the non-blocking send, but I am not sure how it
checks if everything was sent (i.e. the flush part).
Would it also be using the ERRQUEUE for that?

What would you suggest?

>
> > +
> >      return 0;
> >  }
> >
> > @@ -520,12 +550,13 @@ static ssize_t qio_channel_socket_readv(QIOChannel 
> > *ioc,
> >      return ret;
> >  }
> >
> > -static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> > -                                         const struct iovec *iov,
> > -                                         size_t niov,
> > -                                         int *fds,
> > -                                         size_t nfds,
> > -                                         Error **errp)
> > +static ssize_t __qio_channel_socket_writev(QIOChannel *ioc,
> > +                                           const struct iovec *iov,
> > +                                           size_t niov,
> > +                                           int *fds,
> > +                                           size_t nfds,
> > +                                           int flags,
> > +                                           Error **errp)
> >  {
> >      QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> >      ssize_t ret;
> > @@ -558,20 +589,145 @@ static ssize_t qio_channel_socket_writev(QIOChannel 
> > *ioc,
> >      }
> >
> >   retry:
> > -    ret = sendmsg(sioc->fd, &msg, 0);
> > +    ret = sendmsg(sioc->fd, &msg, flags);
> >      if (ret <= 0) {
> > -        if (errno == EAGAIN) {
> > +        switch (errno) {
> > +        case EAGAIN:
> >              return QIO_CHANNEL_ERR_BLOCK;
> > -        }
> > -        if (errno == EINTR) {
> > +        case EINTR:
> >              goto retry;
> > +        case ENOBUFS:
> > +            return QIO_CHANNEL_ERR_NOBUFS;
> >          }
> > +
> >          error_setg_errno(errp, errno,
> >                           "Unable to write to socket");
> >          return -1;
> >      }
> >      return ret;
> >  }
> > +
> > +static ssize_t qio_channel_socket_writev(QIOChannel *ioc,
> > +                                         const struct iovec *iov,
> > +                                         size_t niov,
> > +                                         int *fds,
> > +                                         size_t nfds,
> > +                                         Error **errp)
> > +{
> > +    return __qio_channel_socket_writev(ioc, iov, niov, fds, nfds, 0, errp);
> > +}
> > +
> > +static ssize_t qio_channel_socket_async_writev(QIOChannel *ioc,
> > +                                               const struct iovec *iov,
> > +                                               size_t niov,
> > +                                               int *fds,
> > +                                               size_t nfds,
> > +                                               Error **errp)
> > +{
> > +    QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> > +    ssize_t ret;
> > +
> > +    sioc->async_queued++;
> > +
> > +    ret = __qio_channel_socket_writev(ioc, iov, niov, fds, nfds, 
> > MSG_ZEROCOPY,
> > +                                       errp);
> > +    if (ret == QIO_CHANNEL_ERR_NOBUFS) {
> > +        /*
> > +         * Not enough locked memory available to the process.
> > +         * Fallback to default sync callback.
> > +         */
> > +
> > +        if (errp && *errp) {
> > +            warn_reportf_err(*errp,
> > +                             "Process can't lock enough memory for using 
> > MSG_ZEROCOPY,"
> > +                             "falling back to non-zerocopy");
>
> This is not nice as it hides what is likely mis-configuration error.
> If someone asked for zerocopy, we should honour that or report an
> error back.

Yeah, that makes sense to me.
Thank you for pointing that out.

>
> > +        }
> > +
> > +        QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
> > +        klass->io_async_writev = NULL;
> > +        klass->io_async_flush = NULL;
>
> Clearing the flush callback is wrong. We might have pending async
> writes that haven't been processed that, and the lack of buffers
> may be a transient problem just caused by a backlog of writes.

I agree that it's wrong.
But I think it will be deprecated anyway if we implement ZEROCOPY as
a feature instead of async, and avoid doing fallback to writev when async is
not available.


>
> > +
> > +        /* Re-send current buffer */
> > +        ret = qio_channel_socket_writev(ioc, iov, niov, fds, nfds, errp);
> > +    }
> > +
> > +    return ret;
> > +}
> > +
> > +
> > +static void qio_channel_socket_async_flush(QIOChannel *ioc,
> > +                                           Error **errp)
> > +{
> > +    QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc);
> > +    struct msghdr msg = {};
> > +    struct pollfd pfd;
> > +    struct sock_extended_err *serr;
> > +    struct cmsghdr *cm;
> > +    char control[CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)];
>
> Err  sizeof(int) * SOCKET_MAX_FDS   doesn't look right. This
> buffer needs to hold 'struct sock_extended_err' instances,
> not 'int', and SOCKET_MAX_FDS is an unrelated limit.

That was a bad mistake,
I got it by reusing code from above functions, and it got past the review I
did before sending the patch.
Sorry about that.

>
> > +    int ret;
> > +
> > +    memset(control, 0, CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS));
> > +    msg.msg_control = control;
> > +    msg.msg_controllen = sizeof(control);
> > +
> > +    while (sioc->async_sent < sioc->async_queued) {
> > +        ret = recvmsg(sioc->fd, &msg, MSG_ERRQUEUE);
> > +        if (ret < 0) {
> > +            if (errno == EAGAIN) {
> > +                /* Nothing on errqueue, wait */
> > +                pfd.fd = sioc->fd;
> > +                pfd.events = 0;
> > +                ret = poll(&pfd, 1, 250);
> > +                if (ret == 0) {
> > +                    /*
> > +                     * Timeout : After 250ms without receiving any zerocopy
> > +                     * notification, consider all data as sent.
> > +                     */
>
> This feels very dubious indeed. If some caller needs a guarantee that the
> data was successfully sent, merely waiting 250ms is not going to be reliable
> enough.

That makes sense.
I added this part because at some point in debugging I got an infinite
loop in this part
(I think it was somehow missing some notifications).

>
> A regular non-async + non-zerocopy right will wait as long as is needed
> unless SO_SNDTIMEO has been set on the socket.

So It would be ok to let it loop here?
Maybe the timeout could be only enough to keep the cpu from getting
stuck in here.

>
> At the very least the timeout ought to be a parameter passed in, and the
> return value should indicate whether it timed out, or report how many
> pending writes still aren't processed, so the caller can decide whether
> to call flush again.

That is also makes sense to me.

>
> > +                    break;
> > +                } else if (ret < 0 ||
> > +                           (pfd.revents & (POLLERR | POLLHUP | POLLNVAL))) 
> > {
> > +                    error_setg_errno(errp, errno,
> > +                                     "Poll error");
> > +                    break;
> > +                } else {
> > +                    continue;
> > +                }
> > +            }
> > +            if (errno == EINTR) {
> > +                continue;
> > +            }
> > +
> > +            error_setg_errno(errp, errno,
> > +                             "Unable to read errqueue");
> > +            break;
> > +        }
> > +
> > +        cm = CMSG_FIRSTHDR(&msg);
> > +        if (cm->cmsg_level != SOL_IP &&
> > +            cm->cmsg_type != IP_RECVERR) {
> > +            error_setg_errno(errp, EPROTOTYPE,
> > +                             "Wrong cmsg in errqueue");
> > +            break;
> > +        }
> > +
> > +        serr = (void *) CMSG_DATA(cm);
> > +        if (serr->ee_errno != SO_EE_ORIGIN_NONE) {
> > +            error_setg_errno(errp, serr->ee_errno,
> > +                             "Error on socket");
> > +            break;
> > +        }
> > +        if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) {
> > +            error_setg_errno(errp, serr->ee_origin,
> > +                             "Error not from zerocopy");
> > +            break;
> > +        }
> > +
> > +        /* No errors, count sent ids*/
> > +        sioc->async_sent += serr->ee_data - serr->ee_info + 1;
> > +    }
> > +}
> > +
> > +
> >  #else /* WIN32 */
> >  static ssize_t qio_channel_socket_readv(QIOChannel *ioc,
> >                                          const struct iovec *iov,
>
> Regards,
> Daniel
> --
> |: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
> |: https://libvirt.org         -o-            https://fstop138.berrange.com :|
> |: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|
>

Thanks Daniel,

Best regards,
Leonardo Bras




reply via email to

[Prev in Thread] Current Thread [Next in Thread]