[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v6 25/42] nvme: refactor dma read/write
From: |
Maxim Levitsky |
Subject: |
Re: [PATCH v6 25/42] nvme: refactor dma read/write |
Date: |
Wed, 25 Mar 2020 12:46:08 +0200 |
On Mon, 2020-03-16 at 07:29 -0700, Klaus Jensen wrote:
> From: Klaus Jensen <address@hidden>
>
> Refactor the nvme_dma_{read,write}_prp functions into a common function
> taking a DMADirection parameter.
>
> Signed-off-by: Klaus Jensen <address@hidden>
> ---
> hw/block/nvme.c | 89 ++++++++++++++++++++++++-------------------------
> 1 file changed, 43 insertions(+), 46 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index e40c080c3b48..809d00443369 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -299,55 +299,50 @@ unmap:
> return status;
> }
>
> -static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> - uint64_t prp1, uint64_t prp2)
> +static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> + uint64_t prp1, uint64_t prp2, DMADirection dir)
> {
> QEMUSGList qsg;
> QEMUIOVector iov;
> uint16_t status = NVME_SUCCESS;
>
> - if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
> - return NVME_INVALID_FIELD | NVME_DNR;
> + status = nvme_map_prp(&qsg, &iov, prp1, prp2, len, n);
> + if (status) {
> + return status;
> }
> - if (qsg.nsg > 0) {
> - if (dma_buf_write(ptr, len, &qsg)) {
> - status = NVME_INVALID_FIELD | NVME_DNR;
> - }
> - qemu_sglist_destroy(&qsg);
> - } else {
> - if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
> - status = NVME_INVALID_FIELD | NVME_DNR;
> - }
> - qemu_iovec_destroy(&iov);
> - }
> - return status;
> -}
>
> -static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> - uint64_t prp1, uint64_t prp2)
> -{
> - QEMUSGList qsg;
> - QEMUIOVector iov;
> - uint16_t status = NVME_SUCCESS;
> + if (qsg.nsg > 0) {
> + uint64_t residual;
>
> - trace_nvme_dev_dma_read(prp1, prp2);
> + if (dir == DMA_DIRECTION_TO_DEVICE) {
> + residual = dma_buf_write(ptr, len, &qsg);
> + } else {
> + residual = dma_buf_read(ptr, len, &qsg);
> + }
>
> - if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
> - return NVME_INVALID_FIELD | NVME_DNR;
> - }
> - if (qsg.nsg > 0) {
> - if (unlikely(dma_buf_read(ptr, len, &qsg))) {
> + if (unlikely(residual)) {
> trace_nvme_dev_err_invalid_dma();
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
> +
> qemu_sglist_destroy(&qsg);
> } else {
> - if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
> + size_t bytes;
> +
> + if (dir == DMA_DIRECTION_TO_DEVICE) {
> + bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
> + } else {
> + bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
> + }
> +
> + if (unlikely(bytes != len)) {
> trace_nvme_dev_err_invalid_dma();
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
> +
> qemu_iovec_destroy(&iov);
> }
> +
> return status;
> }
>
> @@ -775,8 +770,8 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd
> *cmd, uint8_t rae,
> nvme_clear_events(n, NVME_AER_TYPE_SMART);
> }
>
> - return nvme_dma_read_prp(n, (uint8_t *) &smart + off, trans_len, prp1,
> - prp2);
> + return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
> @@ -795,8 +790,8 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd
> *cmd, uint32_t buf_len,
>
> trans_len = MIN(sizeof(fw_log) - off, buf_len);
>
> - return nvme_dma_read_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1,
> - prp2);
> + return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
> @@ -820,7 +815,8 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd
> *cmd, uint8_t rae,
>
> trans_len = MIN(sizeof(errlog) - off, buf_len);
>
> - return nvme_dma_read_prp(n, errlog, trans_len, prp1, prp2);
> + return nvme_dma_prp(n, errlog, trans_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> @@ -963,8 +959,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n,
> NvmeIdentify *c)
>
> trace_nvme_dev_identify_ctrl();
>
> - return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
> - prp1, prp2);
> + return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
> + prp2, DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
> @@ -983,8 +979,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n,
> NvmeIdentify *c)
>
> ns = &n->namespaces[nsid - 1];
>
> - return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
> - prp1, prp2);
> + return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
> + prp2, DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
> @@ -1009,7 +1005,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n,
> NvmeIdentify *c)
> break;
> }
> }
> - ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
> + ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
> + DMA_DIRECTION_FROM_DEVICE);
> g_free(list);
> return ret;
> }
> @@ -1044,8 +1041,8 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
> *n, NvmeIdentify *c)
> ns_descr->nidl = NVME_NIDT_UUID_LEN;
> stl_be_p(ns_descr + sizeof(*ns_descr), nsid);
>
> - ret = nvme_dma_read_prp(n, (uint8_t *) list, NVME_IDENTIFY_DATA_SIZE,
> prp1,
> - prp2);
> + ret = nvme_dma_prp(n, (uint8_t *) list, NVME_IDENTIFY_DATA_SIZE, prp1,
> + prp2, DMA_DIRECTION_FROM_DEVICE);
> g_free(list);
> return ret;
> }
> @@ -1128,8 +1125,8 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n,
> NvmeCmd *cmd)
>
> uint64_t timestamp = nvme_get_timestamp(n);
>
> - return nvme_dma_read_prp(n, (uint8_t *)×tamp,
> - sizeof(timestamp), prp1, prp2);
> + return nvme_dma_prp(n, (uint8_t *)×tamp, sizeof(timestamp), prp1,
> + prp2, DMA_DIRECTION_FROM_DEVICE);
> }
>
> static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
> @@ -1214,8 +1211,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n,
> NvmeCmd *cmd)
> uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
> uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
>
> - ret = nvme_dma_write_prp(n, (uint8_t *)×tamp,
> - sizeof(timestamp), prp1, prp2);
> + ret = nvme_dma_prp(n, (uint8_t *)×tamp, sizeof(timestamp), prp1,
> + prp2, DMA_DIRECTION_TO_DEVICE);
> if (ret != NVME_SUCCESS) {
> return ret;
> }
Looks OK to me.
It was a bit difficult to read the diff, so I also read the code after it was
applied.
I hope I didn't miss anything.
Reviewed-by: Maxim Levitsky <address@hidden>
Best regards,
Maxim Levitsky
- Re: [PATCH v6 29/42] nvme: refactor request bounds checking, (continued)
- [PATCH v6 27/42] nvme: add request mapping helper, Klaus Jensen, 2020/03/16
- [PATCH v6 23/42] nvme: add mapping helpers, Klaus Jensen, 2020/03/16
- [PATCH v6 31/42] nvme: add check for prinfo, Klaus Jensen, 2020/03/16
- [PATCH v6 25/42] nvme: refactor dma read/write, Klaus Jensen, 2020/03/16
- Re: [PATCH v6 25/42] nvme: refactor dma read/write,
Maxim Levitsky <=
- [PATCH v6 35/42] nvme: handle dma errors, Klaus Jensen, 2020/03/16
- [PATCH v6 39/42] pci: allocate pci id for nvme, Klaus Jensen, 2020/03/16
- [PATCH v6 37/42] nvme: refactor identify active namespace id list, Klaus Jensen, 2020/03/16
- [PATCH v6 42/42] nvme: make lba data size configurable, Klaus Jensen, 2020/03/16
- [PATCH v6 36/42] nvme: add support for scatter gather lists, Klaus Jensen, 2020/03/16