[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v5 09/14] hw/block/nvme: Support Zoned Namespace Command Set
From: |
Dmitry Fomichev |
Subject: |
Re: [PATCH v5 09/14] hw/block/nvme: Support Zoned Namespace Command Set |
Date: |
Sun, 4 Oct 2020 23:57:07 +0000 |
User-agent: |
Evolution 3.36.5 (3.36.5-1.fc32) |
On Wed, 2020-09-30 at 14:50 +0000, Niklas Cassel wrote:
> On Mon, Sep 28, 2020 at 11:35:23AM +0900, Dmitry Fomichev wrote:
> > The emulation code has been changed to advertise NVM Command Set when
> > "zoned" device property is not set (default) and Zoned Namespace
> > Command Set otherwise.
> >
> > Handlers for three new NVMe commands introduced in Zoned Namespace
> > Command Set specification are added, namely for Zone Management
> > Receive, Zone Management Send and Zone Append.
> >
> > Device initialization code has been extended to create a proper
> > configuration for zoned operation using device properties.
> >
> > Read/Write command handler is modified to only allow writes at the
> > write pointer if the namespace is zoned. For Zone Append command,
> > writes implicitly happen at the write pointer and the starting write
> > pointer value is returned as the result of the command. Write Zeroes
> > handler is modified to add zoned checks that are identical to those
> > done as a part of Write flow.
> >
> > The code to support for Zone Descriptor Extensions is not included in
> > this commit and ZDES 0 is always reported. A later commit in this
> > series will add ZDE support.
> >
> > This commit doesn't yet include checks for active and open zone
> > limits. It is assumed that there are no limits on either active or
> > open zones.
> >
> > Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
> > Signed-off-by: Hans Holmberg <hans.holmberg@wdc.com>
> > Signed-off-by: Ajay Joshi <ajay.joshi@wdc.com>
> > Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
> > Signed-off-by: Matias Bjorling <matias.bjorling@wdc.com>
> > Signed-off-by: Aravind Ramesh <aravind.ramesh@wdc.com>
> > Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
> > Signed-off-by: Adam Manzanares <adam.manzanares@wdc.com>
> > Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
> > ---
> > block/nvme.c | 2 +-
> > hw/block/nvme-ns.c | 185 ++++++++-
> > hw/block/nvme-ns.h | 6 +-
> > hw/block/nvme.c | 872 +++++++++++++++++++++++++++++++++++++++++--
> > include/block/nvme.h | 6 +-
> > 5 files changed, 1033 insertions(+), 38 deletions(-)
> >
> > diff --git a/block/nvme.c b/block/nvme.c
> > index 05485fdd11..7a513c9a17 100644
> > --- a/block/nvme.c
> > +++ b/block/nvme.c
> > @@ -333,7 +333,7 @@ static inline int nvme_translate_error(const NvmeCqe *c)
> > {
> > uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
> > if (status) {
> > - trace_nvme_error(le32_to_cpu(c->result),
> > + trace_nvme_error(le32_to_cpu(c->result32),
> > le16_to_cpu(c->sq_head),
> > le16_to_cpu(c->sq_id),
> > le16_to_cpu(c->cid),
> > diff --git a/hw/block/nvme-ns.c b/hw/block/nvme-ns.c
> > index 31b7f986c3..6d9dc9205b 100644
> > --- a/hw/block/nvme-ns.c
> > +++ b/hw/block/nvme-ns.c
> > @@ -33,14 +33,14 @@ static void nvme_ns_init(NvmeNamespace *ns)
> > NvmeIdNs *id_ns = &ns->id_ns;
> >
> > if (blk_get_flags(ns->blkconf.blk) & BDRV_O_UNMAP) {
> > - ns->id_ns.dlfeat = 0x9;
> > + ns->id_ns.dlfeat = 0x8;
>
> You seem to change something that is NVM namespace specific here, why?
> If it is indeed needed, I assume that this change should be in a separate
> patch.
>
OK, this needs to be done in nvme_zoned_init_ns(). Thanks
> > }
> >
> > id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
> > uint16_t status;
<snip>
> >
> > + header->nr_zones = cpu_to_le64(nr_zones);
> > +
> > + ret = nvme_dma(n, (uint8_t *)buf, len, DMA_DIRECTION_FROM_DEVICE, req);
> > +
> > + g_free(buf);
> > +
> > + return ret;
> > +}
> > +
> > static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
> > {
> > uint32_t nsid = le32_to_cpu(req->cmd.nsid);
> > @@ -1073,9 +1801,15 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest
> > *req)
>
> While you did make sure that we don't expose zone mgmt send/recv/zone append
> in the cmd_effects log when CC.CSS != CSS_CSI, we also need to make sure we
> return Invalid Command Opcode for any of those three commands, if a user tries
> to use them anyway (while CC.CSS != CSI).
>
Yes, good catch. Only the commands that are marked as supported in Commands
Supported and Effects log page are allowed to be executed. I am making some
changes to ensure this.
> > return nvme_flush(n, req);
> > case NVME_CMD_WRITE_ZEROES:
> > return nvme_write_zeroes(n, req);
> > + case NVME_CMD_ZONE_APPEND:
> > + return nvme_rw(n, req, true);
> > case NVME_CMD_WRITE:
> > case NVME_CMD_READ:
> > - return nvme_rw(n, req);
> > + return nvme_rw(n, req, false);
> > + case NVME_CMD_ZONE_MGMT_SEND:
> > + return nvme_zone_mgmt_send(n, req);
> > + case NVME_CMD_ZONE_MGMT_RECV:
> > + return nvme_zone_mgmt_recv(n, req);
> > default:
> > trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
> > return NVME_INVALID_OPCODE | NVME_DNR;
> > @@ -1301,7 +2035,7 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t
> > rae, uint32_t buf_len,
> > DMA_DIRECTION_FROM_DEVICE, req);
> > }
> >
> > -static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint32_t buf_len,
> > +static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t
> > buf_len,
> > uint64_t off, NvmeRequest *req)
> > {
> > NvmeEffectsLog cmd_eff_log = {};
> > @@ -1326,11 +2060,20 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n,
> > uint32_t buf_len,
> > acs[NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFFECTS_CSUPP;
> > acs[NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFFECTS_CSUPP;
> >
> > - iocs[NVME_CMD_FLUSH] = NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC;
> > - iocs[NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFFECTS_CSUPP |
> > - NVME_CMD_EFFECTS_LBCC;
> > - iocs[NVME_CMD_WRITE] = NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC;
> > - iocs[NVME_CMD_READ] = NVME_CMD_EFFECTS_CSUPP;
> > + if (NVME_CC_CSS(n->bar.cc) != CSS_ADMIN_ONLY) {
> > + iocs[NVME_CMD_FLUSH] = NVME_CMD_EFFECTS_CSUPP |
> > NVME_CMD_EFFECTS_LBCC;
> > + iocs[NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFFECTS_CSUPP |
> > + NVME_CMD_EFFECTS_LBCC;
> > + iocs[NVME_CMD_WRITE] = NVME_CMD_EFFECTS_CSUPP |
> > NVME_CMD_EFFECTS_LBCC;
> > + iocs[NVME_CMD_READ] = NVME_CMD_EFFECTS_CSUPP;
> > + }
> > +
> > + if (csi == NVME_CSI_ZONED && NVME_CC_CSS(n->bar.cc) == CSS_CSI) {
> > + iocs[NVME_CMD_ZONE_APPEND] = NVME_CMD_EFFECTS_CSUPP |
> > + NVME_CMD_EFFECTS_LBCC;
> > + iocs[NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFFECTS_CSUPP;
> > + iocs[NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFFECTS_CSUPP;
> > + }
I think the above needs to be changed to only allow admin commands if this
log request arrives with an unrecognized CSI. Some command sets possibly may
not support some or any NVM i/o commands.
> >
> > trans_len = MIN(sizeof(cmd_eff_log) - off, buf_len);
> >
> > @@ -1349,6 +2092,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest
> > *req)
> > uint8_t lid = dw10 & 0xff;
> > uint8_t lsp = (dw10 >> 8) & 0xf;
> > uint8_t rae = (dw10 >> 15) & 0x1;
> > + uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24;
> > uint32_t numdl, numdu;
> > uint64_t off, lpol, lpou;
> > size_t len;
> > @@ -1382,7 +2126,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest
> > *req)
> > case NVME_LOG_FW_SLOT_INFO:
> > return nvme_fw_log_info(n, len, off, req);
> > case NVME_LOG_CMD_EFFECTS:
> > - return nvme_cmd_effects(n, len, off, req);
> > + return nvme_cmd_effects(n, csi, len, off, req);
> > default:
> > trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
> > return NVME_INVALID_FIELD | NVME_DNR;
> > @@ -1502,6 +2246,16 @@ static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl
> > *n, NvmeRequest *req)
> > return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE, req);
> > }
> >
> > +static inline bool nvme_csi_has_nvm_support(NvmeNamespace *ns)
> > +{
> > + switch (ns->csi) {
> > + case NVME_CSI_NVM:
> > + case NVME_CSI_ZONED:
> > + return true;
> > + }
> > + return false;
> > +}
> > +
> > static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
> > {
> > trace_pci_nvme_identify_ctrl();
> > @@ -1513,11 +2267,16 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n,
> > NvmeRequest *req)
> > static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
> > {
> > NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> > + NvmeIdCtrlZoned id = {};
> >
> > trace_pci_nvme_identify_ctrl_csi(c->csi);
> >
> > if (c->csi == NVME_CSI_NVM) {
> > return nvme_rpt_empty_id_struct(n, req);
> > + } else if (c->csi == NVME_CSI_ZONED) {
> > + id.zasl = n->zasl;
> > + return nvme_dma(n, (uint8_t *)&id, sizeof(id),
> > + DMA_DIRECTION_FROM_DEVICE, req);
>
> Please read my comment on nvme_identify_nslist_csi() before reading
> this comment.
>
> At least for this function, the specification is clear:
>
> "If the host requests a data structure for an I/O Command Set that the
> controller does not support, the controller shall abort the command with
> a status of Invalid Field in Command."
>
> If the controller supports the I/O command set == if the Command Set bit
> is set in the data struct returned by the nvme_identify_cmd_set(),
> so here we should do something like:
>
> } else if (->csi == NVME_CSI_ZONED && ctrl_has_zns_namespaces()) {
> ...
> }
>
With this commit, the controller supports ZNS command set regardless of
the number of attached ZNS namespaces. It could be zero, but the controller
still supports it. I think it would be better not to change the behavior
of this command to depend on whether there are any ZNS namespaces added
or not.
> > }
> >
> > return NVME_INVALID_FIELD | NVME_DNR;
> > @@ -1545,8 +2304,12 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n,
> > NvmeRequest *req,
> > return nvme_rpt_empty_id_struct(n, req);
> > }
> >
> > - return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs),
> > - DMA_DIRECTION_FROM_DEVICE, req);
> > + if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
> > + return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs),
> > + DMA_DIRECTION_FROM_DEVICE, req);
> > + }
> > +
> > + return NVME_INVALID_CMD_SET | NVME_DNR;
> > }
> >
> > static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
> > @@ -1571,8 +2334,11 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n,
> > NvmeRequest *req,
> > return nvme_rpt_empty_id_struct(n, req);
> > }
> >
> > - if (c->csi == NVME_CSI_NVM) {
> > + if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
> > return nvme_rpt_empty_id_struct(n, req);
> > + } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) {
> > + return nvme_dma(n, (uint8_t *)ns->id_ns_zoned,
> > sizeof(NvmeIdNsZoned),
> > + DMA_DIRECTION_FROM_DEVICE, req);
> > }
> >
> > return NVME_INVALID_FIELD | NVME_DNR;
> > @@ -1634,7 +2400,7 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n,
> > NvmeRequest *req,
> >
> > trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi);
> >
> > - if (c->csi != NVME_CSI_NVM) {
> > + if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) {
>
> When reading the specification for CNS 07h, I think that it is not clear
> how this should behave...
>
> I'm thinking in the case when c->csi == NVME_CSI_ZONED
> when our QEMU model does only have NVMe namespaces.
>
I think simply returning an empty list is fine in this case. The loop
that follows will not add any nsids to the list and this is what host
is going to receive.
> Either we should return an empty list (1),
> or we should return Invalid Field in Command (2).
>
> If we decide to go with (2),
> then we should probably take the code you have written in
> nvme_identify_cmd_set():
>
> + for (i = 1; i <= n->num_namespaces; i++) {
> + ns = nvme_ns(n, i);
> + if (ns && ns->params.zoned) {
> + NVME_SET_CSI(*list, NVME_CSI_ZONED);
> + break;
> + }
> + }
>
> And move it into a ctrl_has_zns_namespaces() helper function,
> and then do something like:
> if (!(c->csi == NVME_CSI_NVM || (ctrl_has_zns_namespaces() && c->csi ==
> NVME_CSI_ZONED))
> return NVME_INVALID_FIELD | NVME_DNR;
>
>
> > return NVME_INVALID_FIELD | NVME_DNR;
> > }
> >
> > @@ -1643,7 +2409,7 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n,
> > NvmeRequest *req,
> > if (!ns) {
> > continue;
> > }
> > - if (ns->params.nsid < min_nsid) {
> > + if (ns->params.nsid < min_nsid || c->csi != ns->csi) {
> > continue;
> > }
> > if (only_active && !ns->params.attached) {
> > @@ -1696,19 +2462,29 @@ static uint16_t
> > nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
> > desc->nidt = NVME_NIDT_CSI;
> > desc->nidl = NVME_NIDL_CSI;
> > list_ptr += sizeof(*desc);
> > - *(uint8_t *)list_ptr = NVME_CSI_NVM;
> > + *(uint8_t *)list_ptr = ns->csi;
> >
> > return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
> > }
> >
> > static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
> > {
> > + NvmeNamespace *ns;
> > uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
> > static const int data_len = sizeof(list);
> > + int i;
> >
> > trace_pci_nvme_identify_cmd_set();
> >
> > NVME_SET_CSI(*list, NVME_CSI_NVM);
> > + for (i = 1; i <= n->num_namespaces; i++) {
> > + ns = nvme_ns(n, i);
> > + if (ns && ns->params.zoned) {
> > + NVME_SET_CSI(*list, NVME_CSI_ZONED);
> > + break;
> > + }
> > + }
> > +
> > return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
> > }
> >
> > @@ -1751,7 +2527,7 @@ static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest
> > *req)
> > {
> > uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
> >
> > - req->cqe.result = 1;
> > + req->cqe.result32 = 1;
> > if (nvme_check_sqid(n, sqid)) {
> > return NVME_INVALID_FIELD | NVME_DNR;
> > }
> > @@ -1932,7 +2708,7 @@ defaults:
> > }
> >
> > out:
> > - req->cqe.result = cpu_to_le32(result);
> > + req->cqe.result32 = cpu_to_le32(result);
> > return NVME_SUCCESS;
> > }
> >
> > @@ -2057,8 +2833,8 @@ static uint16_t nvme_set_feature(NvmeCtrl *n,
> > NvmeRequest *req)
> > ((dw11 >> 16) & 0xFFFF) + 1,
> > n->params.max_ioqpairs,
> > n->params.max_ioqpairs);
> > - req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
> > - ((n->params.max_ioqpairs - 1) <<
> > 16));
> > + req->cqe.result32 = cpu_to_le32((n->params.max_ioqpairs - 1) |
> > + ((n->params.max_ioqpairs - 1) <<
> > 16));
> > break;
> > case NVME_ASYNCHRONOUS_EVENT_CONF:
> > n->features.async_config = dw11;
> > @@ -2310,16 +3086,28 @@ static int nvme_start_ctrl(NvmeCtrl *n)
> > continue;
> > }
> > ns->params.attached = false;
> > - switch (ns->params.csi) {
> > + switch (ns->csi) {
> > case NVME_CSI_NVM:
> > if (NVME_CC_CSS(n->bar.cc) == CSS_NVM_ONLY ||
> > NVME_CC_CSS(n->bar.cc) == CSS_CSI) {
> > ns->params.attached = true;
> > }
> > break;
> > + case NVME_CSI_ZONED:
> > + if (NVME_CC_CSS(n->bar.cc) == CSS_CSI) {
> > + ns->params.attached = true;
> > + }
> > + break;
> > }
> > }
>
> Like I wrote in my review comment in the patch that added support for the new
> allocated CNS values, I prefer if we remove this for-loop completely, and
> simply set attached = true in nvme_ns_setup()/nvme_ns_init() instead.
>
> (I was considering if we should set attach = true in nvme_zoned_init_ns(),
> but because nvme_ns_setup()/nvme_ns_init() is called for all namespaces,
> including ZNS namespaces, I don't think that any additional code in
> nvme_zoned_init_ns() is warranted.)
I think CC.CSS value is not available during namespace setup and if we
assign active flag in nvme_zoned_ns_setup(), zoned namespaces may end up
being active even if NVM Only command set is selected. So keeping this loop
seems like a good idea.
>
> >
> > + if (!n->zasl_bs) {
> > + assert(n->params.mdts);
> > + n->zasl = n->params.mdts;
> > + } else {
> > + n->zasl = 31 - clz32(n->zasl_bs / n->page_size);
> > + }
> > +
> > nvme_set_timestamp(n, 0ULL);
> >
> > QTAILQ_INIT(&n->aer_queue);
> > @@ -2382,10 +3170,11 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr
> > offset, uint64_t data,
> > case CSS_NVM_ONLY:
> > trace_pci_nvme_css_nvm_cset_selected_by_host(data &
> >
> > 0xffffffff);
> > - break;
> > + break;
> > case CSS_CSI:
> > NVME_SET_CC_CSS(n->bar.cc, CSS_CSI);
> > - trace_pci_nvme_css_all_csets_sel_by_host(data &
> > 0xffffffff);
> > + trace_pci_nvme_css_all_csets_sel_by_host(data &
> > + 0xffffffff);
> > break;
> > case CSS_ADMIN_ONLY:
> > break;
> > @@ -2780,6 +3569,12 @@ static void nvme_init_state(NvmeCtrl *n)
> > n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING;
> > n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
> > n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
> > +
> > + if (!n->params.zasl_kb) {
> > + n->zasl_bs = n->params.mdts ? 0 : NVME_DEFAULT_MAX_ZA_SIZE * KiB;
> > + } else {
> > + n->zasl_bs = n->params.zasl_kb * KiB;
> > + }
> > }
> >
> > int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
> > @@ -2985,8 +3780,9 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice
> > *pci_dev)
> > NVME_CAP_SET_CQR(n->bar.cap, 1);
> > NVME_CAP_SET_TO(n->bar.cap, 0xf);
> > /*
> > - * The device now always supports NS Types, but all commands
> > - * that support CSI field will only handle NVM Command Set.
> > + * The device now always supports NS Types, even when "zoned" property
> > + * is set to zero. If this is the case, all commands that support CSI
> > + * field only handle NVM Command Set.
> > */
> > NVME_CAP_SET_CSS(n->bar.cap, (CAP_CSS_NVM | CAP_CSS_CSI_SUPP));
> > NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
> > @@ -3033,9 +3829,21 @@ static void nvme_realize(PCIDevice *pci_dev, Error
> > **errp)
> > static void nvme_exit(PCIDevice *pci_dev)
> > {
> > NvmeCtrl *n = NVME(pci_dev);
> > + NvmeNamespace *ns;
> > + int i;
> >
> > nvme_clear_ctrl(n);
> > +
> > + for (i = 1; i <= n->num_namespaces; i++) {
> > + ns = nvme_ns(n, i);
> > + if (!ns) {
> > + continue;
> > + }
> > +
> > + nvme_ns_cleanup(ns);
> > + }
> > g_free(n->namespaces);
> > +
> > g_free(n->cq);
> > g_free(n->sq);
> > g_free(n->aer_reqs);
> > @@ -3063,6 +3871,8 @@ static Property nvme_props[] = {
> > DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued,
> > 64),
> > DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
> > DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
> > + DEFINE_PROP_UINT8("fill_pattern", NvmeCtrl, params.fill_pattern, 0),
> > + DEFINE_PROP_UINT32("zone_append_size_limit", NvmeCtrl, params.zasl_kb,
> > 0),
> > DEFINE_PROP_END_OF_LIST(),
> > };
> >
> > diff --git a/include/block/nvme.h b/include/block/nvme.h
> > index a7126e123f..628c665728 100644
> > --- a/include/block/nvme.h
> > +++ b/include/block/nvme.h
> > @@ -651,8 +651,10 @@ typedef struct QEMU_PACKED NvmeAerResult {
> > } NvmeAerResult;
> >
> > typedef struct QEMU_PACKED NvmeCqe {
> > - uint32_t result;
> > - uint32_t rsvd;
> > + union {
> > + uint64_t result64;
> > + uint32_t result32;
> > + };
> > uint16_t sq_head;
> > uint16_t sq_id;
> > uint16_t cid;
> > --
> > 2.21.0