qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 5/8] virtio-blk: mark IO_CODE functions


From: Stefan Hajnoczi
Subject: [PATCH 5/8] virtio-blk: mark IO_CODE functions
Date: Tue, 8 Nov 2022 16:19:27 -0500

From: Emanuele Giuseppe Esposito <eesposit@redhat.com>

Just as done in the block API, mark functions in virtio-blk
that are called also from iothread(s).

We know such functions are IO because many are blk_* callbacks,
running always in the device iothread, and remaining are propagated
from the leaf IO functions (if a function calls a IO_CODE function,
itself is categorized as IO_CODE too).

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20220609143727.1151816-7-eesposit@redhat.com>
---
 hw/block/dataplane/virtio-blk.c |  4 +++
 hw/block/virtio-blk.c           | 45 ++++++++++++++++++++++++++++-----
 2 files changed, 43 insertions(+), 6 deletions(-)

diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 728c9cd86c..3593ac0e7b 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -63,6 +63,8 @@ static void notify_guest_bh(void *opaque)
     unsigned long bitmap[BITS_TO_LONGS(nvqs)];
     unsigned j;
 
+    IO_CODE();
+
     memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
     memset(s->batch_notify_vqs, 0, sizeof(bitmap));
 
@@ -288,6 +290,8 @@ static void virtio_blk_data_plane_stop_bh(void *opaque)
     VirtIOBlockDataPlane *s = opaque;
     unsigned i;
 
+    IO_CODE();
+
     for (i = 0; i < s->conf->num_queues; i++) {
         VirtQueue *vq = virtio_get_queue(s->vdev, i);
 
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 02b213a140..f8fcf25292 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -39,6 +39,8 @@
 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
                                     VirtIOBlockReq *req)
 {
+    IO_CODE();
+
     req->dev = s;
     req->vq = vq;
     req->qiov.size = 0;
@@ -57,6 +59,8 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, 
unsigned char status)
     VirtIOBlock *s = req->dev;
     VirtIODevice *vdev = VIRTIO_DEVICE(s);
 
+    IO_CODE();
+
     trace_virtio_blk_req_complete(vdev, req, status);
 
     stb_p(&req->in->status, status);
@@ -76,6 +80,8 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, 
int error,
     VirtIOBlock *s = req->dev;
     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
 
+    IO_CODE();
+
     if (action == BLOCK_ERROR_ACTION_STOP) {
         /* Break the link as the next request is going to be parsed from the
          * ring again. Otherwise we may end up doing a double completion! */
@@ -143,7 +149,9 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
     VirtIOBlockReq *req = opaque;
     VirtIOBlock *s = req->dev;
 
+    IO_CODE();
     aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
+
     if (ret) {
         if (virtio_blk_handle_rw_error(req, -ret, 0, true)) {
             goto out;
@@ -165,7 +173,9 @@ static void virtio_blk_discard_write_zeroes_complete(void 
*opaque, int ret)
     bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
                             ~VIRTIO_BLK_T_BARRIER) == 
VIRTIO_BLK_T_WRITE_ZEROES;
 
+    IO_CODE();
     aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
+
     if (ret) {
         if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
             goto out;
@@ -198,6 +208,8 @@ static void virtio_blk_ioctl_complete(void *opaque, int 
status)
     struct virtio_scsi_inhdr *scsi;
     struct sg_io_hdr *hdr;
 
+    IO_CODE();
+
     scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
 
     if (status) {
@@ -239,6 +251,8 @@ static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock 
*s, VirtQueue *vq)
 {
     VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
 
+    IO_CODE();
+
     if (req) {
         virtio_blk_init_request(s, vq, req);
     }
@@ -259,6 +273,8 @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
     BlockAIOCB *acb;
 #endif
 
+    IO_CODE();
+
     /*
      * We require at least one output segment each for the virtio_blk_outhdr
      * and the SCSI command block.
@@ -357,6 +373,7 @@ fail:
 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
 {
     int status;
+    IO_CODE();
 
     status = virtio_blk_handle_scsi_req(req);
     if (status != -EINPROGRESS) {
@@ -374,6 +391,8 @@ static inline void submit_requests(VirtIOBlock *s, 
MultiReqBuffer *mrb,
     bool is_write = mrb->is_write;
     BdrvRequestFlags flags = 0;
 
+    IO_CODE();
+
     if (num_reqs > 1) {
         int i;
         struct iovec *tmp_iov = qiov->iov;
@@ -423,6 +442,8 @@ static int multireq_compare(const void *a, const void *b)
     const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
                          *req2 = *(VirtIOBlockReq **)b;
 
+    IO_CODE();
+
     /*
      * Note that we can't simply subtract sector_num1 from sector_num2
      * here as that could overflow the return value.
@@ -442,6 +463,8 @@ static void virtio_blk_submit_multireq(VirtIOBlock *s, 
MultiReqBuffer *mrb)
     uint32_t max_transfer;
     int64_t sector_num = 0;
 
+    IO_CODE();
+
     if (mrb->num_reqs == 1) {
         submit_requests(s, mrb, 0, 1, -1);
         mrb->num_reqs = 0;
@@ -491,6 +514,8 @@ static void virtio_blk_handle_flush(VirtIOBlockReq *req, 
MultiReqBuffer *mrb)
 {
     VirtIOBlock *s = req->dev;
 
+    IO_CODE();
+
     block_acct_start(blk_get_stats(s->blk), &req->acct, 0,
                      BLOCK_ACCT_FLUSH);
 
@@ -509,6 +534,8 @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
     uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
     uint64_t total_sectors;
 
+    IO_CODE();
+
     if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
         return false;
     }
@@ -535,6 +562,8 @@ static uint8_t 
virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
     uint8_t err_status;
     int bytes;
 
+    IO_CODE();
+
     sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
     num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
     flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
@@ -613,6 +642,8 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, 
MultiReqBuffer *mrb)
     VirtIOBlock *s = req->dev;
     VirtIODevice *vdev = VIRTIO_DEVICE(s);
 
+    IO_CODE();
+
     if (req->elem.out_num < 1 || req->elem.in_num < 1) {
         virtio_error(vdev, "virtio-blk missing headers");
         return -1;
@@ -763,6 +794,8 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
     MultiReqBuffer mrb = {};
     bool suppress_notifications = virtio_queue_get_notification(vq);
 
+    IO_CODE();
+
     aio_context_acquire(blk_get_aio_context(s->blk));
     blk_io_plug(s->blk);
 
@@ -796,6 +829,8 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, 
VirtQueue *vq)
 {
     VirtIOBlock *s = (VirtIOBlock *)vdev;
 
+    IO_CODE();
+
     if (s->dataplane && !s->dataplane_started) {
         /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
          * dataplane here instead of waiting for .set_status().
@@ -846,8 +881,9 @@ static void virtio_blk_dma_restart_bh(void *opaque)
 }
 
 /*
- * Only called when VM is started or stopped in cpus.c.
- * No iothread runs in parallel
+ * Only called when VM is started or stopped in cpus.c. When running is true
+ * ->start_ioeventfd() has already been called. When running is false
+ * ->stop_ioeventfd() has not yet been called.
  */
 static void virtio_blk_dma_restart_cb(void *opaque, bool running,
                                       RunState state)
@@ -867,6 +903,7 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool 
running,
             virtio_blk_dma_restart_bh, s);
 }
 
+/* ->stop_ioeventfd() has already been called by virtio_bus_reset() */
 static void virtio_blk_reset(VirtIODevice *vdev)
 {
     VirtIOBlock *s = VIRTIO_BLK(vdev);
@@ -877,10 +914,6 @@ static void virtio_blk_reset(VirtIODevice *vdev)
 
     ctx = blk_get_aio_context(s->blk);
     aio_context_acquire(ctx);
-    /*
-     * This drain together with ->stop_ioeventfd() in virtio_pci_reset()
-     * stops all Iothreads.
-     */
     blk_drain(s->blk);
 
     /* We drop queued requests after blk_drain() because blk_drain() itself can
-- 
2.38.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]