[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-block] [RFC PATCH 05/11] block: Mark fd handlers as "protocol"
From: |
Fam Zheng |
Subject: |
[Qemu-block] [RFC PATCH 05/11] block: Mark fd handlers as "protocol" |
Date: |
Thu, 23 Jul 2015 14:32:12 +0800 |
The "protocol" type includes all the fd handlers and event notifiers used by
block layer, especially those that should be polled in a nested event loop.
Signed-off-by: Fam Zheng <address@hidden>
---
block/curl.c | 8 ++++----
block/iscsi.c | 4 ++--
block/linux-aio.c | 4 ++--
block/nbd-client.c | 8 ++++----
block/nfs.c | 6 +++---
block/sheepdog.c | 22 +++++++++++-----------
block/ssh.c | 4 ++--
block/win32-aio.c | 4 ++--
include/block/aio.h | 1 +
9 files changed, 31 insertions(+), 30 deletions(-)
diff --git a/block/curl.c b/block/curl.c
index 6925672..75d237c 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -154,19 +154,19 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int
action,
DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
switch (action) {
case CURL_POLL_IN:
- aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
curl_multi_read, NULL, state);
break;
case CURL_POLL_OUT:
- aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
NULL, curl_multi_do, state);
break;
case CURL_POLL_INOUT:
- aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
curl_multi_read, curl_multi_do, state);
break;
case CURL_POLL_REMOVE:
- aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
NULL, NULL, NULL);
break;
}
diff --git a/block/iscsi.c b/block/iscsi.c
index 0ee1295..1713625 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -292,7 +292,7 @@ iscsi_set_events(IscsiLun *iscsilun)
if (ev != iscsilun->events) {
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsi),
- AIO_CLIENT_UNSPECIFIED,
+ AIO_CLIENT_PROTOCOL,
(ev & POLLIN) ? iscsi_process_read : NULL,
(ev & POLLOUT) ? iscsi_process_write : NULL,
iscsilun);
@@ -1277,7 +1277,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
IscsiLun *iscsilun = bs->opaque;
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
- AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+ AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
iscsilun->events = 0;
if (iscsilun->nop_timer) {
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 0921bde..1491684 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -287,7 +287,7 @@ void laio_detach_aio_context(void *s_, AioContext
*old_context)
{
struct qemu_laio_state *s = s_;
- aio_set_event_notifier(old_context, &s->e, AIO_CLIENT_UNSPECIFIED, NULL);
+ aio_set_event_notifier(old_context, &s->e, AIO_CLIENT_PROTOCOL, NULL);
qemu_bh_delete(s->completion_bh);
}
@@ -296,7 +296,7 @@ void laio_attach_aio_context(void *s_, AioContext
*new_context)
struct qemu_laio_state *s = s_;
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
- aio_set_event_notifier(new_context, &s->e, AIO_CLIENT_UNSPECIFIED,
+ aio_set_event_notifier(new_context, &s->e, AIO_CLIENT_PROTOCOL,
qemu_laio_completion_cb);
}
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 36c46c5..edf2199 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -124,7 +124,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
s->send_coroutine = qemu_coroutine_self();
aio_context = bdrv_get_aio_context(bs);
- aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL,
nbd_reply_ready, nbd_restart_write, bs);
if (qiov) {
if (!s->is_unix) {
@@ -144,7 +144,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
} else {
rc = nbd_send_request(s->sock, request);
}
- aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL,
nbd_reply_ready, NULL, bs);
s->send_coroutine = NULL;
qemu_co_mutex_unlock(&s->send_mutex);
@@ -350,14 +350,14 @@ void nbd_client_detach_aio_context(BlockDriverState *bs)
{
aio_set_fd_handler(bdrv_get_aio_context(bs),
nbd_get_client_session(bs)->sock,
- AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+ AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
}
void nbd_client_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sock,
- AIO_CLIENT_UNSPECIFIED, nbd_reply_ready, NULL, bs);
+ AIO_CLIENT_PROTOCOL, nbd_reply_ready, NULL, bs);
}
void nbd_client_close(BlockDriverState *bs)
diff --git a/block/nfs.c b/block/nfs.c
index a21dd6f..4d12067 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -63,7 +63,7 @@ static void nfs_set_events(NFSClient *client)
int ev = nfs_which_events(client->context);
if (ev != client->events) {
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
- AIO_CLIENT_UNSPECIFIED,
+ AIO_CLIENT_PROTOCOL,
(ev & POLLIN) ? nfs_process_read : NULL,
(ev & POLLOUT) ? nfs_process_write : NULL, client);
@@ -241,7 +241,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs)
NFSClient *client = bs->opaque;
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
- AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+ AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
client->events = 0;
}
@@ -261,7 +261,7 @@ static void nfs_client_close(NFSClient *client)
nfs_close(client->context, client->fh);
}
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
- AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+ AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
nfs_destroy_context(client->context);
}
memset(client, 0, sizeof(NFSClient));
diff --git a/block/sheepdog.c b/block/sheepdog.c
index e0552b7..de9f8be 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -624,7 +624,7 @@ static coroutine_fn void do_co_req(void *opaque)
unsigned int *rlen = srco->rlen;
co = qemu_coroutine_self();
- aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_PROTOCOL,
NULL, restart_co_req, co);
ret = send_co_req(sockfd, hdr, data, wlen);
@@ -632,7 +632,7 @@ static coroutine_fn void do_co_req(void *opaque)
goto out;
}
- aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_PROTOCOL,
restart_co_req, NULL, co);
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
@@ -658,7 +658,7 @@ static coroutine_fn void do_co_req(void *opaque)
out:
/* there is at most one request for this sockfd, so it is safe to
* set each handler to NULL. */
- aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(srco->aio_context, sockfd, AIO_CLIENT_PROTOCOL,
NULL, NULL, NULL);
srco->ret = ret;
@@ -743,7 +743,7 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
BDRVSheepdogState *s = opaque;
AIOReq *aio_req, *next;
- aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED, NULL,
+ aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL, NULL,
NULL, NULL);
close(s->fd);
s->fd = -1;
@@ -957,7 +957,7 @@ static int get_sheep_fd(BDRVSheepdogState *s, Error **errp)
return fd;
}
- aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(s->aio_context, fd, AIO_CLIENT_PROTOCOL,
co_read_response, NULL, s);
return fd;
}
@@ -1213,7 +1213,7 @@ static void coroutine_fn
add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
qemu_co_mutex_lock(&s->lock);
s->co_send = qemu_coroutine_self();
- aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL,
co_read_response, co_write_request, s);
socket_set_cork(s->fd, 1);
@@ -1232,7 +1232,7 @@ static void coroutine_fn
add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
}
out:
socket_set_cork(s->fd, 0);
- aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL,
co_read_response, NULL, s);
s->co_send = NULL;
qemu_co_mutex_unlock(&s->lock);
@@ -1411,7 +1411,7 @@ static void sd_detach_aio_context(BlockDriverState *bs)
{
BDRVSheepdogState *s = bs->opaque;
- aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_UNSPECIFIED, NULL,
+ aio_set_fd_handler(s->aio_context, s->fd, AIO_CLIENT_PROTOCOL, NULL,
NULL, NULL);
}
@@ -1421,7 +1421,7 @@ static void sd_attach_aio_context(BlockDriverState *bs,
BDRVSheepdogState *s = bs->opaque;
s->aio_context = new_context;
- aio_set_fd_handler(new_context, s->fd, AIO_CLIENT_UNSPECIFIED,
+ aio_set_fd_handler(new_context, s->fd, AIO_CLIENT_PROTOCOL,
co_read_response, NULL, s);
}
@@ -1537,7 +1537,7 @@ static int sd_open(BlockDriverState *bs, QDict *options,
int flags,
return 0;
out:
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
- AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+ AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
if (s->fd >= 0) {
closesocket(s->fd);
}
@@ -1922,7 +1922,7 @@ static void sd_close(BlockDriverState *bs)
}
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
- AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+ AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
closesocket(s->fd);
g_free(s->host_spec);
}
diff --git a/block/ssh.c b/block/ssh.c
index 71d7ffe..3e721d2 100644
--- a/block/ssh.c
+++ b/block/ssh.c
@@ -803,7 +803,7 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s,
BlockDriverState *bs)
rd_handler, wr_handler);
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
- AIO_CLIENT_UNSPECIFIED, rd_handler, wr_handler, co);
+ AIO_CLIENT_PROTOCOL, rd_handler, wr_handler, co);
}
static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
@@ -811,7 +811,7 @@ static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
{
DPRINTF("s->sock=%d", s->sock);
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
- AIO_CLIENT_UNSPECIFIED, NULL, NULL, NULL);
+ AIO_CLIENT_PROTOCOL, NULL, NULL, NULL);
}
/* A non-blocking call returned EAGAIN, so yield, ensuring the
diff --git a/block/win32-aio.c b/block/win32-aio.c
index 0081886..57b1916 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -174,7 +174,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
AioContext *old_context)
{
- aio_set_event_notifier(old_context, &aio->e, AIO_CLIENT_UNSPECIFIED, NULL);
+ aio_set_event_notifier(old_context, &aio->e, AIO_CLIENT_PROTOCOL, NULL);
aio->is_aio_context_attached = false;
}
@@ -182,7 +182,7 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
AioContext *new_context)
{
aio->is_aio_context_attached = true;
- aio_set_event_notifier(new_context, &aio->e, AIO_CLIENT_UNSPECIFIED,
+ aio_set_event_notifier(new_context, &aio->e, AIO_CLIENT_PROTOCOL,
win32_aio_completion_cb);
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 7a1c63e..1895a74 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -273,6 +273,7 @@ bool aio_pending(AioContext *ctx);
bool aio_dispatch(AioContext *ctx);
#define AIO_CLIENT_UNSPECIFIED (1 << 0)
+#define AIO_CLIENT_PROTOCOL (1 << 1)
#define AIO_CLIENT_MASK_ALL -1
/* Progress in completing AIO work to occur. This can issue new pending
--
2.4.3
- [Qemu-block] [RFC PATCH 08/11] dataplane: Mark host notifiers' client type as "dataplane", (continued)
[Qemu-block] [RFC PATCH 09/11] block: Introduce bdrv_aio_poll, Fam Zheng, 2015/07/23
[Qemu-block] [RFC PATCH 06/11] nbd: Mark fd handlers client type as "nbd server", Fam Zheng, 2015/07/23
[Qemu-block] [RFC PATCH 10/11] block: Replace nested aio_poll with bdrv_aio_poll, Fam Zheng, 2015/07/23
[Qemu-block] [RFC PATCH 05/11] block: Mark fd handlers as "protocol",
Fam Zheng <=
[Qemu-block] [RFC PATCH 03/11] aio-posix: Introduce aio_poll_clients, Fam Zheng, 2015/07/23
[Qemu-block] [RFC PATCH 07/11] aio: Mark ctx->notifier's client type as "context", Fam Zheng, 2015/07/23