[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH 02/16] hw/block/nvme: add mapping helpers
From: |
Maxim Levitsky |
Subject: |
Re: [PATCH 02/16] hw/block/nvme: add mapping helpers |
Date: |
Wed, 29 Jul 2020 16:57:40 +0300 |
User-agent: |
Evolution 3.36.3 (3.36.3-1.fc32) |
On Mon, 2020-07-20 at 13:37 +0200, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
>
> Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
> use them in nvme_map_prp.
>
> This fixes a bug where in the case of a CMB transfer, the device would
> map to the buffer with a wrong length.
>
> Fixes: b2b2b67a00574 ("nvme: Add support for Read Data and Write Data in
> CMBs.")
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
> hw/block/nvme.c | 109 +++++++++++++++++++++++++++++++++++-------
> hw/block/trace-events | 2 +
> 2 files changed, 94 insertions(+), 17 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 4d7b730a62b6..9b1a080cdc70 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -109,6 +109,11 @@ static uint16_t nvme_sqid(NvmeRequest *req)
> return le16_to_cpu(req->sq->sqid);
> }
>
> +static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
> +{
> + return &n->cmbuf[addr - n->ctrl_mem.addr];
I would add an assert here just in case we do out of bounds array access.
> +}
> +
> static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
> {
> hwaddr low = n->ctrl_mem.addr;
> @@ -120,7 +125,7 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
> static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
> {
> if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
> - memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
> + memcpy(buf, nvme_addr_to_cmb(n, addr), size);
OK.
> return;
> }
>
> @@ -203,29 +208,91 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue
> *cq)
> }
> }
>
> +static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr
> addr,
> + size_t len)
> +{
> + if (!len) {
> + return NVME_SUCCESS;
> + }
> +
> + trace_pci_nvme_map_addr_cmb(addr, len);
> +
> + if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
> + return NVME_DATA_TRAS_ERROR;
> + }
> +
> + qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
> +
> + return NVME_SUCCESS;
> +}
OK
> +
> +static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector
> *iov,
> + hwaddr addr, size_t len)
> +{
> + if (!len) {
> + return NVME_SUCCESS;
> + }
> +
> + trace_pci_nvme_map_addr(addr, len);
> +
> + if (nvme_addr_is_cmb(n, addr)) {
> + if (qsg && qsg->sg) {
> + return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> + }
> +
> + assert(iov);
> +
> + if (!iov->iov) {
> + qemu_iovec_init(iov, 1);
> + }
> +
> + return nvme_map_addr_cmb(n, iov, addr, len);
> + }
> +
> + if (iov && iov->iov) {
> + return NVME_INVALID_USE_OF_CMB | NVME_DNR;
> + }
> +
> + assert(qsg);
> +
> + if (!qsg->sg) {
> + pci_dma_sglist_init(qsg, &n->parent_obj, 1);
> + }
> +
> + qemu_sglist_add(qsg, addr, len);
> +
> + return NVME_SUCCESS;
> +}
OK
> +
> static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t
> prp1,
> uint64_t prp2, uint32_t len, NvmeCtrl *n)
> {
> hwaddr trans_len = n->page_size - (prp1 % n->page_size);
> trans_len = MIN(len, trans_len);
> int num_prps = (len >> n->page_bits) + 1;
> + uint16_t status;
>
> if (unlikely(!prp1)) {
> trace_pci_nvme_err_invalid_prp();
> return NVME_INVALID_FIELD | NVME_DNR;
> - } else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
> - prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
> - qsg->nsg = 0;
> + }
> +
> + if (nvme_addr_is_cmb(n, prp1)) {
> qemu_iovec_init(iov, num_prps);
> - qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr],
> trans_len);
> } else {
> pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
> - qemu_sglist_add(qsg, prp1, trans_len);
> }
> +
> + status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
> + if (status) {
> + goto unmap;
> + }
> +
> len -= trans_len;
> if (len) {
> if (unlikely(!prp2)) {
> trace_pci_nvme_err_invalid_prp2_missing();
> + status = NVME_INVALID_FIELD | NVME_DNR;
> goto unmap;
> }
> if (len > n->page_size) {
> @@ -242,6 +309,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
> if (i == n->max_prp_ents - 1 && len > n->page_size) {
> if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
> trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> + status = NVME_INVALID_FIELD | NVME_DNR;
> goto unmap;
> }
>
> @@ -255,14 +323,14 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
>
> if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
> trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
> + status = NVME_INVALID_FIELD | NVME_DNR;
> goto unmap;
> }
>
> trans_len = MIN(len, n->page_size);
> - if (qsg->nsg){
> - qemu_sglist_add(qsg, prp_ent, trans_len);
> - } else {
> - qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent -
> n->ctrl_mem.addr], trans_len);
> + status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
> + if (status) {
> + goto unmap;
> }
> len -= trans_len;
> i++;
> @@ -270,20 +338,27 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
> } else {
> if (unlikely(prp2 & (n->page_size - 1))) {
> trace_pci_nvme_err_invalid_prp2_align(prp2);
> + status = NVME_INVALID_FIELD | NVME_DNR;
> goto unmap;
> }
> - if (qsg->nsg) {
> - qemu_sglist_add(qsg, prp2, len);
> - } else {
> - qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 -
> n->ctrl_mem.addr], trans_len);
> + status = nvme_map_addr(n, qsg, iov, prp2, len);
> + if (status) {
> + goto unmap;
> }
> }
> }
> return NVME_SUCCESS;
>
> - unmap:
> - qemu_sglist_destroy(qsg);
> - return NVME_INVALID_FIELD | NVME_DNR;
> +unmap:
> + if (iov && iov->iov) {
> + qemu_iovec_destroy(iov);
> + }
> +
> + if (qsg && qsg->sg) {
> + qemu_sglist_destroy(qsg);
> + }
> +
> + return status;
> }
>
> static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> diff --git a/hw/block/trace-events b/hw/block/trace-events
> index 7b7303cab1dd..f3b2d004e078 100644
> --- a/hw/block/trace-events
> +++ b/hw/block/trace-events
> @@ -33,6 +33,8 @@ pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ
> vector %u"
> pci_nvme_irq_pin(void) "pulsing IRQ pin"
> pci_nvme_irq_masked(void) "IRQ is masked"
> pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64"
> prp2=0x%"PRIx64""
> +pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len
> %"PRIu64""
> +pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len
> %"PRIu64""
> pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode)
> "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8""
> pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode) "cid
> %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8""
> pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count,
> uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
Looks good. I could have missed something, but compared to older version of
similiar code I reviewed,
this looks much better and easy to t understand.
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Best regards,
Maxim Levitsky
[PATCH 04/16] hw/block/nvme: remove redundant has_sg member, Klaus Jensen, 2020/07/20