bug-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 1/1] gsync: Do not block the thread calling gsync_wait ()


From: Sergey Bugaev
Subject: [RFC PATCH 1/1] gsync: Do not block the thread calling gsync_wait ()
Date: Thu, 10 Jun 2021 19:46:34 +0300

Instead, delay the reply message until the corresponding
gsync_wake () call is made.

TODO: Handle the case of reply port dying.
---
 include/mach/gnumach.defs |   4 ++
 kern/gsync.c              | 112 +++++++++++++++++++++++++++-----------
 kern/gsync.h              |   8 ++-
 3 files changed, 89 insertions(+), 35 deletions(-)

diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs
index 531b5d4d..06859196 100644
--- a/include/mach/gnumach.defs
+++ b/include/mach/gnumach.defs
@@ -33,6 +33,9 @@ subsystem
 GNUMACH_IMPORTS
 #endif
 
+type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE | polymorphic
+       ctype: mach_port_t;
+
 type vm_cache_statistics_data_t = struct[11] of integer_t;
 
 type vm_wire_t = int;
@@ -98,6 +101,7 @@ routine register_new_task_notification(
  * - GSYNC_TIMED: The call only blocks for MSEC milliseconds. */
 routine gsync_wait(
   task : task_t;
+  sreplyport reply_port: reply_port_t;
   addr : vm_address_t;
   val1 : unsigned;
   val2 : unsigned;
diff --git a/kern/gsync.c b/kern/gsync.c
index e73a6cf0..aae39ba6 100644
--- a/kern/gsync.c
+++ b/kern/gsync.c
@@ -21,9 +21,14 @@
 #include <kern/sched_prim.h>
 #include <kern/thread.h>
 #include <kern/list.h>
+#include <mach/mig_errors.h>
 #include <vm/vm_map.h>
 #include <vm/vm_kern.h>
 
+#define GSYNC_MSGH_SEQNO 0
+
+static struct kmem_cache gsync_waiter_cache;
+
 /* An entry in the global hash table. */
 struct gsync_hbucket
 {
@@ -57,12 +62,13 @@ union gsync_key
     } any;
 };
 
-/* A thread that is blocked on an address with 'gsync_wait'. */
+/* An RPC that is blocked on an address with 'gsync_wait'. */
 struct gsync_waiter
 {
   struct list link;
   union gsync_key key;
-  thread_t waiter;
+  ipc_port_t reply_port;
+  mach_msg_type_name_t reply_port_type;
 };
 
 /* Needed data for temporary mappings. */
@@ -83,6 +89,9 @@ void gsync_setup (void)
       list_init (&gsync_buckets[i].entries);
       kmutex_init (&gsync_buckets[i].lock);
     }
+
+  kmem_cache_init (&gsync_waiter_cache, "gsync_waiter",
+                   sizeof (struct gsync_waiter), 0, NULL, 0);
 }
 
 /* Convenience comparison functions for gsync_key's. */
@@ -219,24 +228,38 @@ temp_mapping (struct vm_args *vap, vm_offset_t addr, 
vm_prot_t prot)
   return (paddr);
 }
 
-kern_return_t gsync_wait (task_t task, vm_offset_t addr,
-  unsigned int lo, unsigned int hi, natural_t msec, int flags)
+kern_return_t
+gsync_wait (task_t task,
+            ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+            vm_offset_t addr,
+            unsigned int lo, unsigned int hi, natural_t msec, int flags)
 {
+  struct gsync_waiter *w;
+
   if (task == 0)
     return (KERN_INVALID_TASK);
   else if (addr % sizeof (int) != 0)
     return (KERN_INVALID_ADDRESS);
 
+  if (! IP_VALID (reply_port))
+    return (KERN_INVALID_CAPABILITY);
+  if (reply_port_type == MACH_MSG_TYPE_MOVE_RECEIVE)
+    return (KERN_INVALID_RIGHT);
+
+  w = (struct gsync_waiter *) kmem_cache_alloc (&gsync_waiter_cache);
+  if (! w)
+    return (KERN_RESOURCE_SHORTAGE);
+
   vm_map_lock_read (task->map);
 
-  struct gsync_waiter w;
   struct vm_args va;
   boolean_t remote = task != current_task ();
-  int bucket = gsync_prepare_key (task, addr, flags, &w.key, &va);
+  int bucket = gsync_prepare_key (task, addr, flags, &w->key, &va);
 
   if (bucket < 0)
     {
       vm_map_unlock_read (task->map);
+      kmem_cache_free (&gsync_waiter_cache, (vm_offset_t) w);
       return (KERN_INVALID_ADDRESS);
     }
   else if (remote)
@@ -270,6 +293,7 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr,
           vm_map_unlock_read (task->map);
           /* Make sure to remove the reference we added. */
           vm_object_deallocate (va.obj);
+          kmem_cache_free (&gsync_waiter_cache, (vm_offset_t) w);
           return (KERN_MEMORY_FAILURE);
         }
 
@@ -293,6 +317,7 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr,
   if (! equal)
     {
       kmutex_unlock (&hbp->lock);
+      kmem_cache_free (&gsync_waiter_cache, (vm_offset_t) w);
       return (KERN_INVALID_ARGUMENT);
     }
 
@@ -300,37 +325,18 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr,
    * compares strictly greater than this waiter. */
   struct list *runp;
   list_for_each (&hbp->entries, runp)
-    if (gsync_key_lt (&w.key, &node_to_waiter(runp)->key))
+    if (gsync_key_lt (&w->key, &node_to_waiter(runp)->key))
       break;
 
-  /* Finally, add ourselves to the list and go to sleep. */
-  list_add (runp->prev, runp, &w.link);
-  w.waiter = current_thread ();
+  /* Finally, add ourselves to the list. */
 
-  if (flags & GSYNC_TIMED)
-    thread_will_wait_with_timeout (w.waiter, msec);
-  else
-    thread_will_wait (w.waiter);
+  list_add (runp->prev, runp, &w->link);
+  w->reply_port = reply_port;
+  w->reply_port_type = reply_port_type;
 
   kmutex_unlock (&hbp->lock);
-  thread_block (thread_no_continuation);
-
-  /* We're back. */
-  kern_return_t ret = KERN_SUCCESS;
-  if (current_thread()->wait_result != THREAD_AWAKENED)
-    {
-      /* We were interrupted or timed out. */
-      kmutex_lock (&hbp->lock, FALSE);
-      if (!list_node_unlinked (&w.link))
-        list_remove (&w.link);
-      kmutex_unlock (&hbp->lock);
-
-      /* Map the error code. */
-      ret = current_thread()->wait_result == THREAD_INTERRUPTED ?
-        KERN_INTERRUPTED : KERN_TIMEDOUT;
-    }
 
-  return (ret);
+  return (MIG_NO_REPLY);
 }
 
 /* Remove a waiter from the queue, wake it up, and
@@ -338,11 +344,51 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr,
 static inline struct list*
 dequeue_waiter (struct list *nodep)
 {
+  struct gsync_waiter *w;
   struct list *nextp = list_next (nodep);
+  ipc_kmsg_t kmsg;
+  mig_reply_header_t *reply;
+
   list_remove (nodep);
   list_node_init (nodep);
-  clear_wait (node_to_waiter(nodep)->waiter,
-    THREAD_AWAKENED, FALSE);
+  w = node_to_waiter(nodep);
+
+  kmsg = ikm_alloc (sizeof *reply);
+  if (kmsg == IKM_NULL)
+    {
+      if (w->reply_port_type == MACH_MSG_TYPE_MOVE_SEND)
+        ipc_port_release_send (w->reply_port);
+      else
+        ipc_port_release_sonce (w->reply_port);
+      kmem_cache_free (&gsync_waiter_cache, (vm_offset_t) w);
+      return (nextp);
+    }
+
+  ikm_init (kmsg, sizeof *reply);
+  reply = (mig_reply_header_t *) &kmsg->ikm_header;
+
+  reply->Head.msgh_bits = MACH_MSGH_BITS(w->reply_port_type, 0);
+  reply->Head.msgh_size = sizeof *reply;
+  reply->Head.msgh_seqno = GSYNC_MSGH_SEQNO;
+  reply->Head.msgh_local_port = MACH_PORT_NULL;
+  reply->Head.msgh_remote_port = (mach_port_t) w->reply_port;
+  reply->Head.msgh_id = 4304;
+
+  reply->RetCodeType.msgt_name = MACH_MSG_TYPE_INTEGER_32;
+  reply->RetCodeType.msgt_size = 32;
+  reply->RetCodeType.msgt_number = 1;
+  reply->RetCodeType.msgt_inline = TRUE;
+  reply->RetCodeType.msgt_longform = FALSE;
+  reply->RetCodeType.msgt_deallocate = FALSE;
+  reply->RetCodeType.msgt_unused = 0;
+
+  reply->RetCode = KERN_SUCCESS;
+
+  /* Consumes our port reference. */
+  ipc_mqueue_send_always (kmsg);
+
+  kmem_cache_free (&gsync_waiter_cache, (vm_offset_t) w);
+
   return (nextp);
 }
 
diff --git a/kern/gsync.h b/kern/gsync.h
index 8f69be32..1aac556d 100644
--- a/kern/gsync.h
+++ b/kern/gsync.h
@@ -29,8 +29,12 @@
 
 void gsync_setup (void);
 
-kern_return_t gsync_wait (task_t task, vm_offset_t addr,
-  unsigned int lo, unsigned int hi, natural_t msec, int flags);
+kern_return_t gsync_wait (task_t task,
+                          ipc_port_t reply_port,
+                          mach_msg_type_name_t reply_port_type,
+                          vm_offset_t addr,
+                          unsigned int lo, unsigned int hi,
+                          natural_t msec, int flags);
 
 kern_return_t gsync_wake (task_t task,
   vm_offset_t addr, unsigned int val, int flags);
-- 
2.31.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]