guile-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Guile-commits] GNU Guile branch, master, updated. v2.1.0-17-gaac980d


From: Andy Wingo
Subject: [Guile-commits] GNU Guile branch, master, updated. v2.1.0-17-gaac980d
Date: Mon, 13 Feb 2012 14:29:41 +0000

This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU Guile".

http://git.savannah.gnu.org/cgit/guile.git/commit/?id=aac980de43a0466b968a56607664f5ebbca6b751

The branch, master has been updated
       via  aac980de43a0466b968a56607664f5ebbca6b751 (commit)
      from  12087939ed06cafa3c6467ff1462d80880f4d86b (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit aac980de43a0466b968a56607664f5ebbca6b751
Author: Andy Wingo <address@hidden>
Date:   Mon Feb 13 15:29:21 2012 +0100

    weaks: move an allocation outside a critical section
    
    * libguile/weak-set.c (resize_set):
    * libguile/weak-table.c (resize_table): Drop the set/table lock while
      allocating the new vector.  Fixes a bug in which a finalizer could
      recursively try to grab the port table lock.

-----------------------------------------------------------------------

Summary of changes:
 libguile/weak-set.c   |   55 +++++++++++++++++++++++++++++++++----------------
 libguile/weak-table.c |   50 ++++++++++++++++++++++++++++++--------------
 2 files changed, 71 insertions(+), 34 deletions(-)

diff --git a/libguile/weak-set.c b/libguile/weak-set.c
index 626433d..004fedb 100644
--- a/libguile/weak-set.c
+++ b/libguile/weak-set.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011 Free Software Foundation, Inc.
+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
  * 
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public License
@@ -281,28 +281,22 @@ static unsigned long hashset_size[] = {
 
 #define HASHSET_SIZE_N (sizeof(hashset_size)/sizeof(unsigned long))
 
-static void
-resize_set (scm_t_weak_set *set)
+static int
+compute_size_index (scm_t_weak_set *set)
 {
-  scm_t_weak_entry *old_entries, *new_entries;
-  int i;
-  unsigned long old_size, new_size, old_k;
+  int i = set->size_index;
 
-  old_entries = set->entries;
-  old_size = set->size;
-  
   if (set->n_items < set->lower)
     {
       /* rehashing is not triggered when i <= min_size */
-      i = set->size_index;
       do
        --i;
       while (i > set->min_size_index
-            && set->n_items < hashset_size[i] / 4);
+            && set->n_items < hashset_size[i] / 5);
     }
-  else
+  else if (set->n_items > set->upper)
     {
-      i = set->size_index + 1;
+      ++i;
       if (i >= HASHSET_SIZE_N)
         /* The biggest size currently is 230096423, which for a 32-bit
            machine will occupy 1.5GB of memory at a load of 80%.  There
@@ -311,14 +305,39 @@ resize_set (scm_t_weak_set *set)
         abort ();
     }
 
-  new_size = hashset_size[i];
-  new_entries = scm_gc_malloc_pointerless (new_size * sizeof(scm_t_weak_entry),
-                                           "weak set");
+  return i;
+}
+
+static void
+resize_set (scm_t_weak_set *set)
+{
+  scm_t_weak_entry *old_entries, *new_entries;
+  int new_size_index;
+  unsigned long old_size, new_size, old_k;
+
+  do 
+    {
+      new_size_index = compute_size_index (set);
+      if (new_size_index == set->size_index)
+        return;
+      new_size = hashset_size[new_size_index];
+      scm_i_pthread_mutex_unlock (&set->lock);
+      /* Allocating memory might cause finalizers to run, which could
+         run anything, so drop our lock to avoid deadlocks.  */
+      new_entries = scm_gc_malloc_pointerless (new_size * 
sizeof(scm_t_weak_entry),
+                                               "weak set");
+      scm_i_pthread_mutex_unlock (&set->lock);
+    }
+  while (new_size_index != compute_size_index (set));
+
+  old_entries = set->entries;
+  old_size = set->size;
+
   memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry));
 
-  set->size_index = i;
+  set->size_index = new_size_index;
   set->size = new_size;
-  if (i <= set->min_size_index)
+  if (new_size_index <= set->min_size_index)
     set->lower = 0;
   else
     set->lower = new_size / 5;
diff --git a/libguile/weak-table.c b/libguile/weak-table.c
index 01b0791..38fd23d 100644
--- a/libguile/weak-table.c
+++ b/libguile/weak-table.c
@@ -404,28 +404,22 @@ static unsigned long hashtable_size[] = {
 
 #define HASHTABLE_SIZE_N (sizeof(hashtable_size)/sizeof(unsigned long))
 
-static void
-resize_table (scm_t_weak_table *table)
+static int
+compute_size_index (scm_t_weak_table *table)
 {
-  scm_t_weak_entry *old_entries, *new_entries;
-  int i;
-  unsigned long old_size, new_size, old_k;
+  int i = table->size_index;
 
-  old_entries = table->entries;
-  old_size = table->size;
-  
   if (table->n_items < table->lower)
     {
       /* rehashing is not triggered when i <= min_size */
-      i = table->size_index;
       do
        --i;
       while (i > table->min_size_index
-            && table->n_items < hashtable_size[i] / 4);
+            && table->n_items < hashtable_size[i] / 5);
     }
-  else
+  else if (table->n_items > table->upper)
     {
-      i = table->size_index + 1;
+      ++i;
       if (i >= HASHTABLE_SIZE_N)
         /* The biggest size currently is 230096423, which for a 32-bit
            machine will occupy 2.3GB of memory at a load of 80%.  There
@@ -434,12 +428,36 @@ resize_table (scm_t_weak_table *table)
         abort ();
     }
 
-  new_size = hashtable_size[i];
-  new_entries = allocate_entries (new_size, table->kind);
+  return i;
+}
 
-  table->size_index = i;
+static void
+resize_table (scm_t_weak_table *table)
+{
+  scm_t_weak_entry *old_entries, *new_entries;
+  int new_size_index;
+  unsigned long old_size, new_size, old_k;
+
+  do 
+    {
+      new_size_index = compute_size_index (table);
+      if (new_size_index == table->size_index)
+        return;
+      new_size = hashtable_size[new_size_index];
+      scm_i_pthread_mutex_unlock (&table->lock);
+      /* Allocating memory might cause finalizers to run, which could
+         run anything, so drop our lock to avoid deadlocks.  */
+      new_entries = allocate_entries (new_size, table->kind);
+      scm_i_pthread_mutex_unlock (&table->lock);
+    }
+  while (new_size_index != compute_size_index (table));
+
+  old_entries = table->entries;
+  old_size = table->size;
+  
+  table->size_index = new_size_index;
   table->size = new_size;
-  if (i <= table->min_size_index)
+  if (new_size_index <= table->min_size_index)
     table->lower = 0;
   else
     table->lower = new_size / 5;


hooks/post-receive
-- 
GNU Guile



reply via email to

[Prev in Thread] Current Thread [Next in Thread]