qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH for-2.8 2/5] 9pfs: cancel active PDUs in virtfs_rese


From: Greg Kurz
Subject: [Qemu-devel] [PATCH for-2.8 2/5] 9pfs: cancel active PDUs in virtfs_reset()
Date: Fri, 25 Nov 2016 12:54:28 +0100
User-agent: StGit/0.17.1-dirty

According to the 9P spec [1], the version operation should abort any
outstanding I/O, so that a new session may be started in a clean state.
This also makes sense in case of reset: we don't want to keep stale state
around.

This patch modifies virtfs_reset() which is called in both cases, so that
it explicitely cancels and waits for inflight requests to terminate.

Of course this may have impact on the underlying filesystem, as some
operations may have partly modified its state. But it is out of the
scope of this patch to have each operation handle cancellation in a
cleaner way.

[1] http://man.cat-v.org/plan_9/5/version

Signed-off-by: Greg Kurz <address@hidden>
---
 hw/9pfs/9p.c |   38 ++++++++++++++++++++++++++++++--------
 1 file changed, 30 insertions(+), 8 deletions(-)

diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index e4815a97922d..3a48cdcdf975 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -532,10 +532,37 @@ static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU 
*pdu, V9fsPath *path)
     return 0;
 }
 
+static void coroutine_fn pdu_cancel(V9fsPDU *pdu)
+{
+    pdu->cancelled = 1;
+    /*
+     * Wait for pdu to complete.
+     */
+    qemu_co_queue_wait(&pdu->complete);
+    pdu->cancelled = 0;
+    if (!qemu_co_queue_next(&pdu->complete)) {
+        pdu_free(pdu);
+    }
+}
+
 static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
 {
     V9fsState *s = pdu->s;
     V9fsFidState *fidp;
+    bool done = false;
+
+    while (!done) {
+        V9fsPDU *cancel_pdu;
+
+        done = true;
+        QLIST_FOREACH(cancel_pdu, &s->active_list, next) {
+            if (cancel_pdu != pdu) {
+                done = false;
+                pdu_cancel(cancel_pdu);
+                break;
+            }
+        }
+    }
 
     /* Free all fids */
     while (s->fid_list) {
@@ -547,6 +574,7 @@ static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
         } else {
             free_fid(pdu, fidp);
         }
+        free_fid(pdu, fidp);
     }
 }
 
@@ -669,7 +697,7 @@ static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t 
len)
 
     pdu_push_and_notify(pdu);
 
-    /* Now wakeup anybody waiting in flush for this request */
+    /* Now wakeup anybody waiting in flush or reset for this request */
     if (!qemu_co_queue_next(&pdu->complete)) {
         pdu_free(pdu);
     }
@@ -2354,13 +2382,7 @@ static void coroutine_fn v9fs_flush(void *opaque)
         }
     }
     if (cancel_pdu) {
-        cancel_pdu->cancelled = 1;
-        /*
-         * Wait for pdu to complete.
-         */
-        qemu_co_queue_wait(&cancel_pdu->complete);
-        cancel_pdu->cancelled = 0;
-        pdu_free(cancel_pdu);
+        pdu_cancel(cancel_pdu);
     }
     pdu_complete(pdu, 7);
 }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]