[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [RFC PATCH v4 06/15] net: port socket to GSource
From: |
liu ping fan |
Subject: |
Re: [Qemu-devel] [RFC PATCH v4 06/15] net: port socket to GSource |
Date: |
Fri, 19 Apr 2013 13:58:40 +0800 |
On Thu, Apr 18, 2013 at 10:34 PM, Stefan Hajnoczi <address@hidden> wrote:
> On Wed, Apr 17, 2013 at 04:39:15PM +0800, Liu Ping Fan wrote:
>> @@ -160,7 +154,13 @@ static void net_socket_send(void *opaque)
>> net_socket_read_poll(s, false);
>> net_socket_write_poll(s, false);
>> if (s->listen_fd != -1) {
>> - qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
>> + nsrc = s->nsrc;
>> + new_nsrc = event_source_new(s->listen_fd,
>> net_socket_listen_handler,
>> + s);
>> + s->nsrc = new_nsrc;
>> + new_nsrc->gfd.events = G_IO_IN;
>> + g_source_destroy(&nsrc->source);
>> + s->nc.info->bind_ctx(&s->nc, NULL);
>
> The following is equivalent:
>
> event_source_release(s->nsrc);
> s->nsrc = event_source_new(s->listen_fd, net_socket_listen_handler, s);
> s->nc.info->bind_ctx(&s->nc, NULL);
>
> Then new_nsrc/nsrc can be dropped and the nsrc memory leak is avoided.
>
Apply here and the following same issues.
> Note that gfd.events = G_IO_IN does not get used since prepare()
> overwrites gfd.events. Please drop and make sure read_poll == true.
>
Apply,
> I'm a little worried that we're lacking G_IO_HUP | G_IO_ERR. Perhaps
> disconnect and network errors will be ignored.
>
NetSocketState can do limited things about these situation, perhaps,
implement net_socket_can_receive() to export such message to frontend
?
Pingfan
>> }
>> closesocket(s->fd);
>>
>> @@ -331,6 +331,14 @@ static void net_socket_cleanup(NetClientState *nc)
>> closesocket(s->listen_fd);
>> s->listen_fd = -1;
>> }
>> + event_source_release(s->nsrc);
>> +}
>> +
>> +static void net_socket_bind_ctx(NetClientState *nc, GMainContext *ctx)
>> +{
>> + NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
>> +
>> + g_source_attach(&s->nsrc->source, ctx);
>> }
>>
>> static NetClientInfo net_dgram_socket_info = {
>> @@ -338,8 +346,22 @@ static NetClientInfo net_dgram_socket_info = {
>> .size = sizeof(NetSocketState),
>> .receive = net_socket_receive_dgram,
>> .cleanup = net_socket_cleanup,
>> + .bind_ctx = net_socket_bind_ctx,
>> };
>>
>> +static gboolean net_socket_dgram_handler(gpointer data)
>> +{
>> + EventGSource *nsrc = (EventGSource *)data;
>> + NetSocketState *s = nsrc->opaque;
>> +
>> + if (nsrc->gfd.revents & G_IO_IN) {
>> + net_socket_send_dgram(s);
>> + } else {
>> + net_socket_writable(s);
>> + }
>> + return true;
>> +}
>> +
>> static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer,
>> const char *model,
>> const char *name,
>> @@ -350,6 +372,7 @@ static NetSocketState
>> *net_socket_fd_init_dgram(NetClientState *peer,
>> socklen_t saddr_len;
>> NetClientState *nc;
>> NetSocketState *s;
>> + EventGSource *nsrc;
>>
>> /* fd passed: multicast: "learn" dgram_dst address from bound address
>> and save it
>> * Because this may be "shared" socket from a "master" process,
>> datagrams would be recv()
>> @@ -393,7 +416,10 @@ static NetSocketState
>> *net_socket_fd_init_dgram(NetClientState *peer,
>>
>> s->fd = fd;
>> s->listen_fd = -1;
>> - s->send_fn = net_socket_send_dgram;
>> + nsrc = event_source_new(fd, net_socket_dgram_handler, s);
>> + s->nsrc = nsrc;
>> + nsrc->gfd.events = G_IO_IN|G_IO_OUT;
>
> Please drop.
>
>> + nc->info->bind_ctx(nc, NULL);
>> net_socket_read_poll(s, true);
>>
>> /* mcast: save bound address as dst */
>> @@ -408,20 +434,28 @@ err:
>> return NULL;
>> }
>>
>> -static void net_socket_connect(void *opaque)
>> -{
>> - NetSocketState *s = opaque;
>> - s->send_fn = net_socket_send;
>> - net_socket_read_poll(s, true);
>> -}
>> -
>> static NetClientInfo net_socket_info = {
>> .type = NET_CLIENT_OPTIONS_KIND_SOCKET,
>> .size = sizeof(NetSocketState),
>> .receive = net_socket_receive,
>> .cleanup = net_socket_cleanup,
>> + .bind_ctx = net_socket_bind_ctx,
>> };
>>
>> +static gboolean net_socket_connect_handler(gpointer data)
>> +{
>> + EventGSource *new_nsrc, *nsrc = data;
>> + NetSocketState *s = nsrc->opaque;
>> +
>> + new_nsrc = event_source_new(s->fd, net_socket_establish_handler, s);
>> + s->nsrc = new_nsrc;
>> + new_nsrc->gfd.events = G_IO_IN|G_IO_OUT;
>
> Please drop.
>
>> + g_source_destroy(&nsrc->source);
>> + s->nc.info->bind_ctx(&s->nc, NULL);
>> +
>> + return true;
>> +}
>> +
>> static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
>> const char *model,
>> const char *name,
>> @@ -429,6 +463,7 @@ static NetSocketState
>> *net_socket_fd_init_stream(NetClientState *peer,
>> {
>> NetClientState *nc;
>> NetSocketState *s;
>> + EventGSource *nsrc;
>>
>> nc = qemu_new_net_client(&net_socket_info, peer, model, name);
>>
>> @@ -440,9 +475,16 @@ static NetSocketState
>> *net_socket_fd_init_stream(NetClientState *peer,
>> s->listen_fd = -1;
>>
>> if (is_connected) {
>> - net_socket_connect(s);
>> + nsrc = event_source_new(fd, net_socket_establish_handler, s);
>> + s->nsrc = nsrc;
>> + nsrc->gfd.events = G_IO_IN|G_IO_OUT;
>
> Please drop.
>
>> + nc->info->bind_ctx(nc, NULL);
>> } else {
>> - qemu_set_fd_handler(s->fd, NULL, net_socket_connect, s);
>> + nsrc = event_source_new(fd, net_socket_connect_handler, s);
>> + s->nsrc = nsrc;
>> + nsrc->gfd.events = G_IO_IN;
>
> Please drop.
>
>> + nc->info->bind_ctx(nc, NULL);
>> +
>> }
>> return s;
>> }
>> @@ -473,30 +515,69 @@ static NetSocketState
>> *net_socket_fd_init(NetClientState *peer,
>> return NULL;
>> }
>>
>> -static void net_socket_accept(void *opaque)
>> +static gboolean net_socket_establish_handler(gpointer data)
>> +{
>> + EventGSource *nsrc = (EventGSource *)data;
>> + NetSocketState *s = nsrc->opaque;
>> +
>> + if (nsrc->gfd.revents & G_IO_IN) {
>> + net_socket_send(s);
>> + } else {
>> + net_socket_writable(s);
>> + }
>> + return true;
>> +}
>> +
>> +static bool readable(void *opaque)
>> {
>> NetSocketState *s = opaque;
>> +
>> + if (s->read_poll && net_socket_can_send(s)) {
>> + return true;
>> + }
>> + return false;
>> +}
>> +
>> +static bool writable(void *opaque)
>> +{
>> + NetSocketState *s = opaque;
>> +
>> + if (s->write_poll) {
>> + return true;
>> + }
>> + return false;
>> +}
>> +
>> +static gboolean net_socket_listen_handler(gpointer data)
>> +{
>> + EventGSource *new_nsrc, *nsrc = data;
>> + NetSocketState *s = nsrc->opaque;
>> struct sockaddr_in saddr;
>> socklen_t len;
>> int fd;
>>
>> - for(;;) {
>> - len = sizeof(saddr);
>> - fd = qemu_accept(s->listen_fd, (struct sockaddr *)&saddr, &len);
>> - if (fd < 0 && errno != EINTR) {
>> - return;
>> - } else if (fd >= 0) {
>> - qemu_set_fd_handler(s->listen_fd, NULL, NULL, NULL);
>> - break;
>> - }
>> + len = sizeof(saddr);
>> + fd = qemu_accept(s->listen_fd, (struct sockaddr *)&saddr, &len);
>> + if (fd < 0 && errno != EINTR) {
>> + return false;
>> }
>>
>> s->fd = fd;
>> s->nc.link_down = false;
>> - net_socket_connect(s);
>> + new_nsrc = event_source_new(fd, net_socket_establish_handler, s);
>> + s->nsrc = new_nsrc;
>> + new_nsrc->gfd.events = G_IO_IN|G_IO_OUT;
>
> Please drop.
>
>> + new_nsrc->readable = readable;
>> + new_nsrc->writable = writable;
>> + /* prevent more than one connect req */
>> + g_source_destroy(&nsrc->source);
>> + s->nc.info->bind_ctx(&s->nc, NULL);
>> + net_socket_read_poll(s, true);
>> snprintf(s->nc.info_str, sizeof(s->nc.info_str),
>> "socket: connection from %s:%d",
>> inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port));
>> +
>> + return true;
>> }
>>
>> static int net_socket_listen_init(NetClientState *peer,
>> @@ -508,6 +589,7 @@ static int net_socket_listen_init(NetClientState *peer,
>> NetSocketState *s;
>> struct sockaddr_in saddr;
>> int fd, val, ret;
>> + EventGSource *nsrc;
>>
>> if (parse_host_port(&saddr, host_str) < 0)
>> return -1;
>> @@ -542,7 +624,11 @@ static int net_socket_listen_init(NetClientState *peer,
>> s->listen_fd = fd;
>> s->nc.link_down = true;
>>
>> - qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
>> + nsrc = event_source_new(fd, net_socket_listen_handler, s);
>> + s->nsrc = nsrc;
>> + nsrc->gfd.events = G_IO_IN;
>
> Please drop.
- Re: [Qemu-devel] [RFC PATCH v4 01/15] util: introduce gsource event abstration, (continued)
[Qemu-devel] [RFC PATCH v4 02/15] net: introduce bind_ctx to NetClientInfo, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 03/15] net: port tap onto GSource, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 04/15] net: resolve race of tap backend and its peer, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 05/15] net: port vde onto GSource, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 06/15] net: port socket to GSource, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 07/15] net: port tap-win32 onto GSource, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 08/15] net: hub use lock to protect ports list, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 09/15] net: introduce lock to protect NetQueue, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 10/15] net: introduce lock to protect NetClientState's peer's access, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 11/15] net: make netclient re-entrant with refcnt, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 12/15] slirp: make timeout local, Liu Ping Fan, 2013/04/17
[Qemu-devel] [RFC PATCH v4 13/15] slirp: make slirp event dispatch based on slirp instance, not global, Liu Ping Fan, 2013/04/17