[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v6 10/11] hw/block/nvme: Separate read and write handlers
From: |
Dmitry Fomichev |
Subject: |
[PATCH v6 10/11] hw/block/nvme: Separate read and write handlers |
Date: |
Wed, 14 Oct 2020 06:42:11 +0900 |
With ZNS support in place, the majority of code in nvme_rw() has
become read- or write-specific. Move these parts to two separate
handlers, nvme_read() and nvme_write() to make the code more
readable and to remove multiple is_write checks that so far existed
in the i/o path.
This is a refactoring patch, no change in functionality.
Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
---
hw/block/nvme.c | 191 +++++++++++++++++++++++++-----------------
hw/block/trace-events | 3 +-
2 files changed, 114 insertions(+), 80 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 41caf35430..1186c16b50 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1162,10 +1162,10 @@ typedef struct NvmeReadFillCtx {
uint32_t post_rd_fill_nlb;
} NvmeReadFillCtx;
-static uint16_t nvme_check_zone_read(NvmeNamespace *ns, NvmeZone *zone,
- uint64_t slba, uint32_t nlb,
- NvmeReadFillCtx *rfc)
+static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba,
+ uint32_t nlb, NvmeReadFillCtx *rfc)
{
+ NvmeZone *zone = nvme_get_zone_by_slba(ns, slba);
NvmeZone *next_zone;
uint64_t bndry = nvme_zone_rd_boundary(ns, zone);
uint64_t end = slba + nlb, wp1, wp2;
@@ -1449,6 +1449,86 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
+static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
+ NvmeNamespace *ns = req->ns;
+ uint64_t slba = le64_to_cpu(rw->slba);
+ uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
+ uint32_t fill_len;
+ uint64_t data_size = nvme_l2b(ns, nlb);
+ uint64_t data_offset, fill_ofs;
+ NvmeReadFillCtx rfc;
+ BlockBackend *blk = ns->blkconf.blk;
+ uint16_t status;
+
+ trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, data_size, slba);
+
+ status = nvme_check_mdts(n, data_size);
+ if (status) {
+ trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
+ goto invalid;
+ }
+
+ status = nvme_check_bounds(n, ns, slba, nlb);
+ if (status) {
+ trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
+ goto invalid;
+ }
+
+ if (ns->params.zoned) {
+ status = nvme_check_zone_read(ns, slba, nlb, &rfc);
+ if (status != NVME_SUCCESS) {
+ trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status);
+ goto invalid;
+ }
+ }
+
+ status = nvme_map_dptr(n, data_size, req);
+ if (status) {
+ goto invalid;
+ }
+
+ if (ns->params.zoned) {
+ if (rfc.pre_rd_fill_nlb) {
+ fill_ofs = nvme_l2b(ns, rfc.pre_rd_fill_slba - slba);
+ fill_len = nvme_l2b(ns, rfc.pre_rd_fill_nlb);
+ nvme_fill_read_data(req, fill_ofs, fill_len,
+ n->params.fill_pattern);
+ }
+ if (!rfc.read_nlb) {
+ /* No backend I/O necessary, only needed to fill the buffer */
+ req->status = NVME_SUCCESS;
+ return NVME_SUCCESS;
+ }
+ if (rfc.post_rd_fill_nlb) {
+ req->fill_ofs = nvme_l2b(ns, rfc.post_rd_fill_slba - slba);
+ req->fill_len = nvme_l2b(ns, rfc.post_rd_fill_nlb);
+ } else {
+ req->fill_len = 0;
+ }
+ slba = rfc.read_slba;
+ data_size = nvme_l2b(ns, rfc.read_nlb);
+ }
+
+ data_offset = nvme_l2b(ns, slba);
+
+ block_acct_start(blk_get_stats(blk), &req->acct, data_size,
+ BLOCK_ACCT_READ);
+ if (req->qsg.sg) {
+ req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
+ BDRV_SECTOR_SIZE, nvme_rw_cb, req);
+ } else {
+ req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
+ nvme_rw_cb, req);
+ }
+ return NVME_NO_COMPLETE;
+
+invalid:
+ block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ);
+ return status | NVME_DNR;
+}
+
static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
{
NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
@@ -1495,25 +1575,20 @@ invalid:
return status | NVME_DNR;
}
-static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req, bool append)
+static uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req, bool append)
{
NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
NvmeNamespace *ns = req->ns;
- uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
uint64_t slba = le64_to_cpu(rw->slba);
+ uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
uint64_t data_size = nvme_l2b(ns, nlb);
- uint64_t data_offset, fill_ofs;
-
+ uint64_t data_offset;
NvmeZone *zone;
- uint32_t fill_len;
- NvmeReadFillCtx rfc;
- bool is_write = rw->opcode == NVME_CMD_WRITE || append;
- enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
BlockBackend *blk = ns->blkconf.blk;
uint16_t status;
- trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode),
- nvme_nsid(ns), nlb, data_size, slba);
+ trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode),
+ nvme_nsid(ns), nlb, data_size, slba);
status = nvme_check_mdts(n, data_size);
if (status) {
@@ -1530,29 +1605,21 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req,
bool append)
if (ns->params.zoned) {
zone = nvme_get_zone_by_slba(ns, slba);
- if (is_write) {
- status = nvme_check_zone_write(n, ns, zone, slba, nlb, append);
- if (status != NVME_SUCCESS) {
- goto invalid;
- }
-
- if (append) {
- slba = zone->w_ptr;
- }
-
- status = nvme_auto_open_zone(ns, zone);
- if (status != NVME_SUCCESS) {
- goto invalid;
- }
-
- req->cqe.result64 = nvme_advance_zone_wp(ns, zone, nlb);
- } else {
- status = nvme_check_zone_read(ns, zone, slba, nlb, &rfc);
- if (status != NVME_SUCCESS) {
- trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status);
- goto invalid;
- }
+ status = nvme_check_zone_write(n, ns, zone, slba, nlb, append);
+ if (status != NVME_SUCCESS) {
+ goto invalid;
}
+
+ status = nvme_auto_open_zone(ns, zone);
+ if (status != NVME_SUCCESS) {
+ goto invalid;
+ }
+
+ if (append) {
+ slba = zone->w_ptr;
+ }
+
+ req->cqe.result64 = nvme_advance_zone_wp(ns, zone, nlb);
} else if (append) {
trace_pci_nvme_err_invalid_opc(rw->opcode);
status = NVME_INVALID_OPCODE;
@@ -1566,56 +1633,21 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req,
bool append)
goto invalid;
}
- if (ns->params.zoned) {
- if (is_write) {
- req->cqe.result64 = nvme_advance_zone_wp(ns, zone, nlb);
- } else {
- if (rfc.pre_rd_fill_nlb) {
- fill_ofs = nvme_l2b(ns, rfc.pre_rd_fill_slba - slba);
- fill_len = nvme_l2b(ns, rfc.pre_rd_fill_nlb);
- nvme_fill_read_data(req, fill_ofs, fill_len,
- n->params.fill_pattern);
- }
- if (!rfc.read_nlb) {
- /* No backend I/O necessary, only needed to fill the buffer */
- req->status = NVME_SUCCESS;
- return NVME_SUCCESS;
- }
- if (rfc.post_rd_fill_nlb) {
- req->fill_ofs = nvme_l2b(ns, rfc.post_rd_fill_slba - slba);
- req->fill_len = nvme_l2b(ns, rfc.post_rd_fill_nlb);
- } else {
- req->fill_len = 0;
- }
- slba = rfc.read_slba;
- data_size = nvme_l2b(ns, rfc.read_nlb);
- }
- }
-
data_offset = nvme_l2b(ns, slba);
- block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct);
+ block_acct_start(blk_get_stats(blk), &req->acct, data_size,
+ BLOCK_ACCT_WRITE);
if (req->qsg.sg) {
- if (is_write) {
- req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- } else {
- req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- }
+ req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
+ BDRV_SECTOR_SIZE, nvme_rw_cb, req);
} else {
- if (is_write) {
- req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
- nvme_rw_cb, req);
- } else {
- req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
- nvme_rw_cb, req);
- }
+ req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
+ nvme_rw_cb, req);
}
return NVME_NO_COMPLETE;
invalid:
- block_acct_invalid(blk_get_stats(blk), acct);
+ block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE);
return status | NVME_DNR;
}
@@ -2096,10 +2128,11 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest
*req)
case NVME_CMD_WRITE_ZEROES:
return nvme_write_zeroes(n, req);
case NVME_CMD_ZONE_APPEND:
- return nvme_rw(n, req, true);
+ return nvme_write(n, req, true);
case NVME_CMD_WRITE:
+ return nvme_write(n, req, false);
case NVME_CMD_READ:
- return nvme_rw(n, req, false);
+ return nvme_read(n, req);
case NVME_CMD_ZONE_MGMT_SEND:
return nvme_zone_mgmt_send(n, req);
case NVME_CMD_ZONE_MGMT_RECV:
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 962084e40c..7ee90a50c3 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -40,7 +40,8 @@ pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t
prp1, uint64_t prp2,
pci_nvme_map_sgl(uint16_t cid, uint8_t typ, uint64_t len) "cid %"PRIu16" type
0x%"PRIx8" len %"PRIu64""
pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode,
const char *opname) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8"
opname '%s'"
pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char
*opname) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'"
-pci_nvme_rw(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb,
uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb
%"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
+pci_nvme_read(uint16_t cid, uint32_t nsid, uint32_t nlb, uint64_t count,
uint64_t lba) "cid %"PRIu16" nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba
0x%"PRIx64""
+pci_nvme_write(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb,
uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb
%"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
pci_nvme_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'"
pci_nvme_write_zeroes(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t
nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32""
pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t
qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64",
sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
--
2.21.0
- Re: [PATCH v6 02/11] hw/block/nvme: Generate namespace UUIDs, (continued)
- [PATCH v6 03/11] hw/block/nvme: Add support for Namespace Types, Dmitry Fomichev, 2020/10/13
- [PATCH v6 04/11] hw/block/nvme: Support allocated CNS command variants, Dmitry Fomichev, 2020/10/13
- [PATCH v6 05/11] hw/block/nvme: Support Zoned Namespace Command Set, Dmitry Fomichev, 2020/10/13
- [PATCH v6 06/11] hw/block/nvme: Introduce max active and open zone limits, Dmitry Fomichev, 2020/10/13
- [PATCH v6 07/11] hw/block/nvme: Support Zone Descriptor Extensions, Dmitry Fomichev, 2020/10/13
- [PATCH v6 08/11] hw/block/nvme: Add injection of Offline/Read-Only zones, Dmitry Fomichev, 2020/10/13
- [PATCH v6 09/11] hw/block/nvme: Document zoned parameters in usage text, Dmitry Fomichev, 2020/10/13
- [PATCH v6 10/11] hw/block/nvme: Separate read and write handlers,
Dmitry Fomichev <=
- [PATCH v6 11/11] hw/block/nvme: Merge nvme_write_zeroes() with nvme_write(), Dmitry Fomichev, 2020/10/13