[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 3/4] 9p: add locking to V9fsDir
From: |
Greg Kurz |
Subject: |
[Qemu-devel] [PATCH v2 3/4] 9p: add locking to V9fsDir |
Date: |
Fri, 03 Jun 2016 09:39:34 +0200 |
User-agent: |
StGit/0.17.1-dirty |
If several threads concurrently call readdir() with the same directory
stream pointer, it is possible that they all get a pointer to the same
dirent structure, whose content is overwritten each time readdir() is
called.
We must thus serialize accesses to the dirent structure.
This may be achieved with a mutex like below:
lock_mutex();
readdir();
// work with the dirent
unlock_mutex();
This patch adds all the locking, to prepare the switch to readdir().
Reviewed-by: Eric Blake <address@hidden>
Signed-off-by: Greg Kurz <address@hidden>
---
v2: - s/call concurrently/concurrently call/
- added Eric's r-b
---
hw/9pfs/9p.c | 21 +++++++++++++++++++++
hw/9pfs/9p.h | 16 ++++++++++++++++
2 files changed, 37 insertions(+)
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 451ecbde0b08..3ed077b25b98 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -300,6 +300,9 @@ static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
f->next = s->fid_list;
s->fid_list = f;
+ v9fs_readdir_init(&f->fs.dir);
+ v9fs_readdir_init(&f->fs_reclaim.dir);
+
return f;
}
@@ -1640,6 +1643,9 @@ static int v9fs_do_readdir_with_stat(V9fsPDU *pdu,
while (1) {
v9fs_path_init(&path);
+
+ v9fs_readdir_lock(&fidp->fs.dir);
+
err = v9fs_co_readdir_r(pdu, fidp, dent, &result);
if (err || !result) {
break;
@@ -1658,6 +1664,9 @@ static int v9fs_do_readdir_with_stat(V9fsPDU *pdu,
}
/* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
len = pdu_marshal(pdu, 11 + count, "S", &v9stat);
+
+ v9fs_readdir_unlock(&fidp->fs.dir);
+
if ((len != (v9stat.size + 2)) || ((count + len) > max_count)) {
/* Ran out of buffer. Set dir back to old position and return */
v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
@@ -1672,6 +1681,8 @@ static int v9fs_do_readdir_with_stat(V9fsPDU *pdu,
saved_dir_pos = dent->d_off;
}
+ v9fs_readdir_unlock(&fidp->fs.dir);
+
g_free(dent);
v9fs_path_free(&path);
if (err < 0) {
@@ -1819,6 +1830,8 @@ static int v9fs_do_readdir(V9fsPDU *pdu,
dent = g_malloc(sizeof(struct dirent));
while (1) {
+ v9fs_readdir_lock(&fidp->fs.dir);
+
err = v9fs_co_readdir_r(pdu, fidp, dent, &result);
if (err || !result) {
break;
@@ -1826,6 +1839,8 @@ static int v9fs_do_readdir(V9fsPDU *pdu,
v9fs_string_init(&name);
v9fs_string_sprintf(&name, "%s", dent->d_name);
if ((count + v9fs_readdir_data_size(&name)) > max_count) {
+ v9fs_readdir_unlock(&fidp->fs.dir);
+
/* Ran out of buffer. Set dir back to old position and return */
v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
v9fs_string_free(&name);
@@ -1847,6 +1862,9 @@ static int v9fs_do_readdir(V9fsPDU *pdu,
len = pdu_marshal(pdu, 11 + count, "Qqbs",
&qid, dent->d_off,
dent->d_type, &name);
+
+ v9fs_readdir_unlock(&fidp->fs.dir);
+
if (len < 0) {
v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
v9fs_string_free(&name);
@@ -1857,6 +1875,9 @@ static int v9fs_do_readdir(V9fsPDU *pdu,
v9fs_string_free(&name);
saved_dir_pos = dent->d_off;
}
+
+ v9fs_readdir_unlock(&fidp->fs.dir);
+
g_free(dent);
if (err < 0) {
return err;
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index 92ee309ef4ba..46d787627a4c 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -169,8 +169,24 @@ typedef struct V9fsXattr
typedef struct V9fsDir {
DIR *stream;
+ QemuMutex readdir_mutex;
} V9fsDir;
+static inline void v9fs_readdir_lock(V9fsDir *dir)
+{
+ qemu_mutex_lock(&dir->readdir_mutex);
+}
+
+static inline void v9fs_readdir_unlock(V9fsDir *dir)
+{
+ qemu_mutex_unlock(&dir->readdir_mutex);
+}
+
+static inline void v9fs_readdir_init(V9fsDir *dir)
+{
+ qemu_mutex_init(&dir->readdir_mutex);
+}
+
/*
* Filled by fs driver on open and other
* calls.