qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 4/6] test-bdrv-drain.c: adapt test to the coming subtree drains


From: Emanuele Giuseppe Esposito
Subject: [PATCH 4/6] test-bdrv-drain.c: adapt test to the coming subtree drains
Date: Tue, 8 Feb 2022 10:36:53 -0500

There will be 2 problems in this test when we will add
subtree drains in bdrv_replace_child_noperm:

- First, the test is inconsistent about taking the AioContext lock when
  calling bdrv_replace_child_noperm.  bdrv_replace_child_noperm is reached
  in two places: from blk_insert_bs directly, and via block_job_create.
  Only the second does it with the AioContext lock taken, and there seems
  to be no reason why the lock is needed.
  Move aio_context_acquire further down, to just protect block_job_add_bdrv()

- Second, test_detach_indirect is only interested in observing the first
  call to .drained_begin. In the original test, there was only a single
  subtree drain; however, with additional drains introduced in
  bdrv_replace_child_noperm(), the test callback would be called too early
  and/or multiple times.
  Override the callback only when we actually want to use it, and put back
  the original after it's been invoked.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 tests/unit/test-bdrv-drain.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 4924ceb562..c52ba2db4e 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -912,12 +912,12 @@ static void test_blockjob_common_drain_node(enum 
drain_type drain_type,
     blk_insert_bs(blk_target, target, &error_abort);
     blk_set_allow_aio_context_change(blk_target, true);
 
-    aio_context_acquire(ctx);
     tjob = block_job_create("job0", &test_job_driver, NULL, src,
                             0, BLK_PERM_ALL,
                             0, 0, NULL, NULL, &error_abort);
     tjob->bs = src;
     job = &tjob->common;
+    aio_context_acquire(ctx);
     block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
 
     switch (result) {
@@ -1342,15 +1342,18 @@ static void detach_by_parent_aio_cb(void *opaque, int 
ret)
     }
 }
 
+static BdrvChildClass detach_by_driver_cb_class;
+
 static void detach_by_driver_cb_drained_begin(BdrvChild *child)
 {
+    /* restore .drained_begin cb, we don't need it anymore. */
+    detach_by_driver_cb_class.drained_begin = child_of_bds.drained_begin;
+
     aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
                             detach_indirect_bh, &detach_by_parent_data);
     child_of_bds.drained_begin(child);
 }
 
-static BdrvChildClass detach_by_driver_cb_class;
-
 /*
  * Initial graph:
  *
@@ -1382,8 +1385,6 @@ static void test_detach_indirect(bool by_parent_cb)
 
     if (!by_parent_cb) {
         detach_by_driver_cb_class = child_of_bds;
-        detach_by_driver_cb_class.drained_begin =
-            detach_by_driver_cb_drained_begin;
     }
 
     /* Create all involved nodes */
@@ -1441,6 +1442,12 @@ static void test_detach_indirect(bool by_parent_cb)
     acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
     g_assert(acb != NULL);
 
+    if (!by_parent_cb) {
+        /* set .drained_begin cb to run only in the following drain. */
+        detach_by_driver_cb_class.drained_begin =
+            detach_by_driver_cb_drained_begin;
+    }
+
     /* Drain and check the expected result */
     bdrv_subtree_drained_begin(parent_b);
 
-- 
2.31.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]