[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v7 27/48] nvme: refactor dma read/write
From: |
Klaus Jensen |
Subject: |
[PATCH v7 27/48] nvme: refactor dma read/write |
Date: |
Wed, 15 Apr 2020 07:51:19 +0200 |
From: Klaus Jensen <address@hidden>
Refactor the nvme_dma_{read,write}_prp functions into a common function
taking a DMADirection parameter.
Signed-off-by: Klaus Jensen <address@hidden>
Reviewed-by: Maxim Levitsky <address@hidden>
---
hw/block/nvme.c | 88 ++++++++++++++++++++++++-------------------------
1 file changed, 43 insertions(+), 45 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 3e41b1337bf7..2ff7dd695cd7 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -307,55 +307,50 @@ unmap:
return status;
}
-static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- uint64_t prp1, uint64_t prp2)
+static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
+ uint64_t prp1, uint64_t prp2, DMADirection dir)
{
QEMUSGList qsg;
QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
- if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
- return NVME_INVALID_FIELD | NVME_DNR;
+ status = nvme_map_prp(&qsg, &iov, prp1, prp2, len, n);
+ if (status) {
+ return status;
}
+
if (qsg.nsg > 0) {
- if (dma_buf_write(ptr, len, &qsg)) {
- status = NVME_INVALID_FIELD | NVME_DNR;
+ uint64_t residual;
+
+ if (dir == DMA_DIRECTION_TO_DEVICE) {
+ residual = dma_buf_write(ptr, len, &qsg);
+ } else {
+ residual = dma_buf_read(ptr, len, &qsg);
}
- qemu_sglist_destroy(&qsg);
- } else {
- if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
- status = NVME_INVALID_FIELD | NVME_DNR;
- }
- qemu_iovec_destroy(&iov);
- }
- return status;
-}
-static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
- uint64_t prp1, uint64_t prp2)
-{
- QEMUSGList qsg;
- QEMUIOVector iov;
- uint16_t status = NVME_SUCCESS;
-
- trace_nvme_dev_dma_read(prp1, prp2);
-
- if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
- return NVME_INVALID_FIELD | NVME_DNR;
- }
- if (qsg.nsg > 0) {
- if (unlikely(dma_buf_read(ptr, len, &qsg))) {
+ if (unlikely(residual)) {
trace_nvme_dev_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
+
qemu_sglist_destroy(&qsg);
} else {
- if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
+ size_t bytes;
+
+ if (dir == DMA_DIRECTION_TO_DEVICE) {
+ bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
+ } else {
+ bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
+ }
+
+ if (unlikely(bytes != len)) {
trace_nvme_dev_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
+
qemu_iovec_destroy(&iov);
}
+
return status;
}
@@ -788,8 +783,8 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, NvmeCmd *cmd,
uint8_t rae,
nvme_clear_events(n, NVME_AER_TYPE_SMART);
}
- return nvme_dma_read_prp(n, (uint8_t *) &smart + off, trans_len, prp1,
- prp2);
+ return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd, uint32_t buf_len,
@@ -808,8 +803,8 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, NvmeCmd *cmd,
uint32_t buf_len,
trans_len = MIN(sizeof(fw_log) - off, buf_len);
- return nvme_dma_read_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1,
- prp2);
+ return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd, uint8_t rae,
@@ -833,7 +828,8 @@ static uint16_t nvme_error_info(NvmeCtrl *n, NvmeCmd *cmd,
uint8_t rae,
trans_len = MIN(sizeof(errlog) - off, buf_len);
- return nvme_dma_read_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2);
+ return nvme_dma_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_get_log(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
@@ -981,8 +977,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n,
NvmeIdentify *c)
trace_nvme_dev_identify_ctrl();
- return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
- prp1, prp2);
+ return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
+ prp2, DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
@@ -1001,8 +997,8 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify
*c)
ns = &n->namespaces[nsid - 1];
- return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
- prp1, prp2);
+ return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
+ prp2, DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
@@ -1027,7 +1023,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n,
NvmeIdentify *c)
break;
}
}
- ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
+ ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE);
g_free(list);
return ret;
}
@@ -1066,7 +1063,8 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n,
NvmeIdentify *c)
ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
stl_be_p(&ns_descrs->uuid.v, nsid);
- return nvme_dma_read_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2);
+ return nvme_dma_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2,
+ DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
@@ -1147,8 +1145,8 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n,
NvmeCmd *cmd)
uint64_t timestamp = nvme_get_timestamp(n);
- return nvme_dma_read_prp(n, (uint8_t *)×tamp,
- sizeof(timestamp), prp1, prp2);
+ return nvme_dma_prp(n, (uint8_t *)×tamp, sizeof(timestamp), prp1,
+ prp2, DMA_DIRECTION_FROM_DEVICE);
}
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
@@ -1233,8 +1231,8 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n,
NvmeCmd *cmd)
uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
- ret = nvme_dma_write_prp(n, (uint8_t *)×tamp,
- sizeof(timestamp), prp1, prp2);
+ ret = nvme_dma_prp(n, (uint8_t *)×tamp, sizeof(timestamp), prp1,
+ prp2, DMA_DIRECTION_TO_DEVICE);
if (ret != NVME_SUCCESS) {
return ret;
}
--
2.26.0
- [PATCH v7 19/48] nvme: support identify namespace descriptor list, (continued)
- [PATCH v7 19/48] nvme: support identify namespace descriptor list, Klaus Jensen, 2020/04/15
- [PATCH v7 14/48] nvme: add support for the asynchronous event request command, Klaus Jensen, 2020/04/15
- [PATCH v7 29/48] nvme: add request mapping helper, Klaus Jensen, 2020/04/15
- [PATCH v7 38/48] nvme: use preallocated qsg/iov in nvme_dma_prp, Klaus Jensen, 2020/04/15
- [PATCH v7 24/48] nvme: add mapping helpers, Klaus Jensen, 2020/04/15
- [PATCH v7 34/48] nvme: refactor NvmeRequest, Klaus Jensen, 2020/04/15
- [PATCH v7 22/48] nvme: bump supported version to v1.3, Klaus Jensen, 2020/04/15
- [PATCH v7 21/48] nvme: provide the mandatory subnqn field, Klaus Jensen, 2020/04/15
- [PATCH v7 25/48] nvme: replace dma_acct with blk_acct equivalent, Klaus Jensen, 2020/04/15
- [PATCH v7 32/48] nvme: add check for mdts, Klaus Jensen, 2020/04/15
- [PATCH v7 27/48] nvme: refactor dma read/write,
Klaus Jensen <=
- [PATCH v7 23/48] nvme: memset preallocated requests structures, Klaus Jensen, 2020/04/15
- [PATCH v7 31/48] nvme: refactor request bounds checking, Klaus Jensen, 2020/04/15
- [PATCH v7 33/48] nvme: be consistent about zeros vs zeroes, Klaus Jensen, 2020/04/15
- [PATCH v7 40/48] nvme: handle dma errors, Klaus Jensen, 2020/04/15
- [PATCH v7 26/48] nvme: remove redundant has_sg member, Klaus Jensen, 2020/04/15
- [PATCH v7 30/48] nvme: verify validity of prp lists in the cmb, Klaus Jensen, 2020/04/15
- [PATCH v7 35/48] nvme: remove NvmeCmd parameter, Klaus Jensen, 2020/04/15
- [PATCH v7 36/48] nvme: allow multiple aios per command, Klaus Jensen, 2020/04/15
- [PATCH v7 37/48] nvme: add nvme_check_rw helper, Klaus Jensen, 2020/04/15