qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 09/17] hw/block/nvme: refactor aio submission


From: Keith Busch
Subject: Re: [PATCH v2 09/17] hw/block/nvme: refactor aio submission
Date: Mon, 21 Sep 2020 08:20:15 -0700

On Fri, Sep 18, 2020 at 10:36:13PM +0200, Klaus Jensen wrote:
> +static inline bool nvme_req_is_write(NvmeRequest *req)
> +{
> +    switch (req->cmd.opcode) {
> +    case NVME_CMD_WRITE:
> +    case NVME_CMD_WRITE_ZEROES:
> +        return true;
> +    default:
> +        return false;
> +    }
> +}

It doesn't look like this is called for WRITE_ZEROES anywhere. It also
looks like this helper is a bit unnecessary. We can reorganize some of
the flow so that we're not checking the opcode twice:

> +static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len,
> +                            NvmeRequest *req)
> +{
> +    BlockAcctCookie *acct = &req->acct;
> +    BlockAcctStats *stats = blk_get_stats(blk);
> +
> +    bool is_write;

    bool is_write = false;

> +    trace_pci_nvme_do_aio(nvme_cid(req), req->cmd.opcode,
> +                          nvme_io_opc_str(req->cmd.opcode), blk_name(blk),
> +                          offset, len);
> +
> +    switch (req->cmd.opcode) {
> +    case NVME_CMD_FLUSH:
> +        block_acct_start(stats, acct, 0, BLOCK_ACCT_FLUSH);
> +        req->aiocb = blk_aio_flush(blk, nvme_rw_cb, req);
> +        break;
> +
> +    case NVME_CMD_WRITE_ZEROES:
> +        block_acct_start(stats, acct, len, BLOCK_ACCT_WRITE);
> +        req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len,
> +                                           BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
> +                                           req);
> +        break;
> +
> +    case NVME_CMD_READ:
> +    case NVME_CMD_WRITE:
> +        is_write = nvme_req_is_write(req);

   case NVME_CMD_WRITE:
       is_write = true; /* fallthrough */
   case NVME_CMD_READ:
       ....

> +
> +        block_acct_start(stats, acct, len,
> +                         is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
> +
> +        if (req->qsg.sg) {
> +            if (is_write) {
> +                req->aiocb = dma_blk_write(blk, &req->qsg, offset,
> +                                           BDRV_SECTOR_SIZE, nvme_rw_cb, 
> req);
> +            } else {
> +                req->aiocb = dma_blk_read(blk, &req->qsg, offset,
> +                                          BDRV_SECTOR_SIZE, nvme_rw_cb, req);
> +            }
> +        } else {
> +            if (is_write) {
> +                req->aiocb = blk_aio_pwritev(blk, offset, &req->iov, 0,
> +                                             nvme_rw_cb, req);
> +            } else {
> +                req->aiocb = blk_aio_preadv(blk, offset, &req->iov, 0,
> +                                            nvme_rw_cb, req);
> +            }
> +        }
> +
> +        break;
> +    }
> +
> +    return NVME_NO_COMPLETE;
>  }



reply via email to

[Prev in Thread] Current Thread [Next in Thread]