qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 03/18] hw/block/nvme: additional tracing


From: Maxim Levitsky
Subject: Re: [PATCH v3 03/18] hw/block/nvme: additional tracing
Date: Wed, 29 Jul 2020 11:52:35 +0300
User-agent: Evolution 3.36.3 (3.36.3-1.fc32)

On Mon, 2020-07-06 at 08:12 +0200, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
> 
> Add various additional tracing and streamline nvme_identify_ns and
> nvme_identify_nslist (they do not need to repeat the command, it is
> already in the trace name).
> 
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> ---
>  hw/block/nvme.c       | 33 +++++++++++++++++++++++++++++++++
>  hw/block/trace-events | 13 +++++++++++--
>  2 files changed, 44 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 766cd5b33bb1..09ef54d771c4 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -69,6 +69,20 @@
>  
>  static void nvme_process_sq(void *opaque);
>  
> +static uint16_t nvme_cid(NvmeRequest *req)
> +{
> +    if (!req) {
> +        return 0xffff;
> +    }
> +
> +    return le16_to_cpu(req->cqe.cid);
> +}
> +
> +static uint16_t nvme_sqid(NvmeRequest *req)
> +{
> +    return le16_to_cpu(req->sq->sqid);
> +}
> +
>  static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
>  {
>      hwaddr low = n->ctrl_mem.addr;
> @@ -331,6 +345,8 @@ static void nvme_post_cqes(void *opaque)
>  static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
>  {
>      assert(cq->cqid == req->sq->cqid);
> +    trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid,
> +                                          req->status);
>      QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
>      QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
>      timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
> @@ -343,6 +359,8 @@ static void nvme_rw_cb(void *opaque, int ret)
>      NvmeCtrl *n = sq->ctrl;
>      NvmeCQueue *cq = n->cq[sq->cqid];
>  
> +    trace_pci_nvme_rw_cb(nvme_cid(req));
> +
>      if (!ret) {
>          block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
>          req->status = NVME_SUCCESS;
> @@ -378,6 +396,8 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, 
> NvmeNamespace *ns, NvmeCmd *cmd,
>      uint64_t offset = slba << data_shift;
>      uint32_t count = nlb << data_shift;
>  
> +    trace_pci_nvme_write_zeroes(nvme_cid(req), slba, nlb);
> +
>      if (unlikely(slba + nlb > ns->id_ns.nsze)) {
>          trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
>          return NVME_LBA_RANGE | NVME_DNR;
> @@ -445,6 +465,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, 
> NvmeRequest *req)
>      NvmeNamespace *ns;
>      uint32_t nsid = le32_to_cpu(cmd->nsid);
>  
> +    trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req), cmd->opcode);
> +
>      if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
>          trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
>          return NVME_INVALID_NSID | NVME_DNR;
> @@ -876,6 +898,8 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd 
> *cmd, NvmeRequest *req)
>  
>  static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
>  {
> +    trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), cmd->opcode);
> +
>      switch (cmd->opcode) {
>      case NVME_ADM_CMD_DELETE_SQ:
>          return nvme_del_sq(n, cmd);
> @@ -1204,6 +1228,8 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr 
> addr, unsigned size)
>      uint8_t *ptr = (uint8_t *)&n->bar;
>      uint64_t val = 0;
>  
> +    trace_pci_nvme_mmio_read(addr);
> +
>      if (unlikely(addr & (sizeof(uint32_t) - 1))) {
>          NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
>                         "MMIO read not 32-bit aligned,"
> @@ -1273,6 +1299,8 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, 
> int val)
>              return;
>          }
>  
> +        trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head);
> +
>          start_sqs = nvme_cq_full(cq) ? 1 : 0;
>          cq->head = new_head;
>          if (start_sqs) {
> @@ -1311,6 +1339,8 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, 
> int val)
>              return;
>          }
>  
> +        trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail);
> +
>          sq->tail = new_tail;
>          timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
>      }
> @@ -1320,6 +1350,9 @@ static void nvme_mmio_write(void *opaque, hwaddr addr, 
> uint64_t data,
>      unsigned size)
>  {
>      NvmeCtrl *n = (NvmeCtrl *)opaque;
> +
> +    trace_pci_nvme_mmio_write(addr, data);
> +
>      if (addr < sizeof(n->bar)) {
>          nvme_write_bar(n, addr, data, size);
>      } else if (addr >= 0x1000) {
> diff --git a/hw/block/trace-events b/hw/block/trace-events
> index 958fcc5508d1..c40c0d2e4b28 100644
> --- a/hw/block/trace-events
> +++ b/hw/block/trace-events
> @@ -33,19 +33,28 @@ pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ 
> vector %u"
>  pci_nvme_irq_pin(void) "pulsing IRQ pin"
>  pci_nvme_irq_masked(void) "IRQ is masked"
>  pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" 
> prp2=0x%"PRIx64""
> +pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode) 
> "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8""
> +pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode) "cid 
> %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8""
>  pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, 
> uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
> +pci_nvme_rw_cb(uint16_t cid) "cid %"PRIu16""
> +pci_nvme_write_zeroes(uint16_t cid, uint64_t slba, uint32_t nlb) "cid 
> %"PRIu16" slba %"PRIu64" nlb %"PRIu32""
>  pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t 
> qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", 
> sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
>  pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t 
> size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", 
> cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
>  pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
>  pci_nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16""
>  pci_nvme_identify_ctrl(void) "identify controller"
> -pci_nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
> -pci_nvme_identify_nslist(uint16_t ns) "identify namespace list, 
> nsid=%"PRIu16""
> +pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32""
> +pci_nvme_identify_nslist(uint32_t ns) "nsid %"PRIu32""
>  pci_nvme_getfeat_vwcache(const char* result) "get feature volatile write 
> cache, result=%s"
>  pci_nvme_getfeat_numq(int result) "get feature number of queues, result=%d"
>  pci_nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested 
> cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
>  pci_nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64""
>  pci_nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64""
> +pci_nvme_enqueue_req_completion(uint16_t cid, uint16_t cqid, uint16_t 
> status) "cid %"PRIu16" cqid %"PRIu16" status 0x%"PRIx16""
> +pci_nvme_mmio_read(uint64_t addr) "addr 0x%"PRIx64""
> +pci_nvme_mmio_write(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 
> 0x%"PRIx64""
> +pci_nvme_mmio_doorbell_cq(uint16_t cqid, uint16_t new_head) "cqid %"PRIu16" 
> new_head %"PRIu16""
> +pci_nvme_mmio_doorbell_sq(uint16_t sqid, uint16_t new_tail) "cqid %"PRIu16" 
> new_tail %"PRIu16""
>  pci_nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, 
> interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64""
>  pci_nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, 
> interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64""
>  pci_nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller 
> config=0x%"PRIx64""
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

Best regards,
        Maxim Levitsky




reply via email to

[Prev in Thread] Current Thread [Next in Thread]