getfem-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Getfem-commits] r5086 - in /trunk/getfem/src: getfem/getfem_omp.h getfe


From: andriy . andreykiv
Subject: [Getfem-commits] r5086 - in /trunk/getfem/src: getfem/getfem_omp.h getfem_models.cc getfem_omp.cc
Date: Fri, 11 Sep 2015 11:27:59 -0000

Author: andrico
Date: Fri Sep 11 13:27:59 2015
New Revision: 5086

URL: http://svn.gna.org/viewcvs/getfem?rev=5086&view=rev
Log:
added a mechanism to capture exceptions in parallel sections and pass them to 
the master thread

Modified:
    trunk/getfem/src/getfem/getfem_omp.h
    trunk/getfem/src/getfem_models.cc
    trunk/getfem/src/getfem_omp.cc

Modified: trunk/getfem/src/getfem/getfem_omp.h
URL: 
http://svn.gna.org/viewcvs/getfem/trunk/getfem/src/getfem/getfem_omp.h?rev=5086&r1=5085&r2=5086&view=diff
==============================================================================
--- trunk/getfem/src/getfem/getfem_omp.h        (original)
+++ trunk/getfem/src/getfem/getfem_omp.h        Fri Sep 11 13:27:59 2015
@@ -82,7 +82,7 @@
   };
 
   //like boost::lock_guard, but copyable
-  class local_guard 
+  class local_guard
   {
   public:
     local_guard(boost::recursive_mutex&);
@@ -93,7 +93,7 @@
     boost::shared_ptr<boost::lock_guard<boost::recursive_mutex>> plock_;
   };
 
-  //produces scoped lock on the 
+  //produces scoped lock on the
   //mutex, held in this class
   class lock_factory
   {
@@ -108,7 +108,7 @@
   };
 
 
-#else 
+#else
 
   class omp_guard{};
   class local_guard{};
@@ -119,7 +119,7 @@
 #endif
 
 
-#ifdef GETFEM_HAVE_OPENMP      
+#ifdef GETFEM_HAVE_OPENMP
   /**number of OpenMP threads*/
   inline size_t num_threads(){return omp_get_max_threads();}
 
@@ -140,10 +140,10 @@
 
 
 
-  /**use this template class for any object you want to 
+  /**use this template class for any object you want to
   distribute to open_MP threads. The creation of this
   object should happen in serial, while accessing the individual
-  thread local instances will take place in parallel. If 
+  thread local instances will take place in parallel. If
   one needs creation of thread local object, use the macro
   DEFINE_STATIC_THREAD_LOCAL
   */
@@ -160,7 +160,7 @@
     };
   public:
     omp_distribute() : thread_values(num_threads()) {}
-    omp_distribute(const T& value) : 
+    omp_distribute(const T& value) :
       thread_values(num_threads(),value) {}
     operator T& (){return thread_values[this_thread()];}
     operator const T& () const {return thread_values[this_thread()];}
@@ -168,11 +168,11 @@
     const T& thrd_cast() const {return thread_values[this_thread()];}
     T& operator()(size_type i) {
       return thread_values[i];
-    }  
+    }
     const T& operator()(size_type i) const {
       return thread_values[i];
     }
-    T& operator = (const T& x){ 
+    T& operator = (const T& x){
       return (thread_values[this_thread()]=x);
     }
 
@@ -196,19 +196,19 @@
   public:
     typedef std::vector<T> VEC;
     omp_distribute() : thread_values(num_threads()) {}
-    omp_distribute(size_t n, const T& value) : 
+    omp_distribute(size_t n, const T& value) :
       thread_values(num_threads(), std::vector<T>(n,value)){}
     operator VEC& (){return thread_values[this_thread()];}
-    operator const VEC& () const 
+    operator const VEC& () const
     {return thread_values[this_thread()];}
-    VEC& operator()(size_type i) {return thread_values[i];}    
+    VEC& operator()(size_type i) {return thread_values[i];}
     const VEC& operator()(size_type i) const {return thread_values[i];}
     VEC& thrd_cast(){return thread_values[this_thread()];}
-    const VEC& thrd_cast() const 
+    const VEC& thrd_cast() const
     {return thread_values[this_thread()];}
-    T& operator[](size_type i) 
-    {return thread_values[this_thread()][i];}  
-    const T& operator[](size_type i) const 
+    T& operator[](size_type i)
+    {return thread_values[this_thread()][i];}
+    const T& operator[](size_type i) const
     {return thread_values[this_thread()][i];}
     T& operator = (const T& value) {
       return (thread_values[this_thread()]=value);
@@ -233,7 +233,7 @@
   public:
 
     omp_distribute() : thread_values(num_threads()) {}
-    omp_distribute(const bool& value) : 
+    omp_distribute(const bool& value) :
       thread_values(num_threads(),value) {}
     operator BOOL& (){return thread_values[this_thread()];}
     operator const BOOL& () const {return thread_values[this_thread()];}
@@ -241,11 +241,11 @@
     const BOOL& thrd_cast() const {return thread_values[this_thread()];}
     BOOL& operator()(size_type i) {
       return thread_values[i];
-    }  
+    }
     const BOOL& operator()(size_type i) const {
       return thread_values[i];
     }
-    BOOL& operator = (const BOOL& x){ 
+    BOOL& operator = (const BOOL& x){
       return (thread_values[this_thread()]=x);
     }
     all_values_proxy all_threads(){return all_values_proxy(*this);}
@@ -297,14 +297,14 @@
 
 #if defined _WIN32 && !defined (__GNUC__)
   /**parallelization function for a for loop*/
-  template<class LOOP_BODY> 
-  inline void open_mp_for(int begin, int end, 
+  template<class LOOP_BODY>
+  inline void open_mp_for(int begin, int end,
     const LOOP_BODY& loop_body){
-      _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); 
+      _configthreadlocale(_ENABLE_PER_THREAD_LOCALE);
       gmm::standard_locale locale;
       open_mp_is_running_properly check;
-#pragma omp parallel default(shared) 
-      { 
+#pragma omp parallel default(shared)
+      {
         _setmbcp(_MB_CP_ANSI);
 #pragma omp for schedule(static)
         for(int i=begin;i<end;i++) loop_body(i);
@@ -313,31 +313,24 @@
   }
 #else /*LINUX*/
   /**parallelization function for a for loop*/
-  template<class LOOP_BODY> 
+  template<class LOOP_BODY>
   inline void open_mp_for(
     int begin, int end, const LOOP_BODY& loop_body){
       gmm::standard_locale locale;
       open_mp_is_running_properly check;
-#pragma omp parallel default(shared) 
-      { 
+#pragma omp parallel default(shared)
+      {
 #pragma omp for schedule(static)
         for(int i=begin;i<end;i++) loop_body(i);
       }
   }
 #endif
 
-
-
-
-
-
-
-
   /**parallelization macro of a for loop*/
 #define OPEN_MP_FOR(begin,end,loop_counter,loop_body) \
   getfem::open_mp_for(begin,end,loop_body(loop_counter));
 
-  /**used to partition a mesh region so that 
+  /**used to partition a mesh region so that
   each partition can be used on a different thread. Thread safe*/
   class region_partition {
     mesh* pparent_mesh;
@@ -346,10 +339,45 @@
   public:
     region_partition(mesh* mmesh=0,size_type id=-1);
     region_partition(const region_partition& rp);
-    void operator=(const region_partition& rp);                
+    void operator=(const region_partition& rp);
     size_type thread_local_partition() const;
   };
 
+  /**Allows to re-throw exceptions, generated in OpemMP parallel section.
+  Collects exceptions from all threads and on destruction re-throws the first 
one, so that
+  it can be again caught in the master thread*/
+  class thread_exception {
+  public:
+    thread_exception();
+
+    /**re-throws the first captured exception*/
+    ~thread_exception();
+
+    /**run function f in parallel part to capture it's exceptions. Possible 
syntax can be:
+    thread_exception exception;
+    #pragma omp parallel...
+    {
+      exception.run([&]
+      {
+        your code that can throw exceptions
+      });
+    }*/
+    template <typename Function, typename... Parameters>
+    void run(Function f, Parameters... params)
+    {
+      try {f(params...);} catch (...){captureException();}
+    }
+
+    /**vector of pointers to caught exceptions*/
+    std::vector<std::exception_ptr> caughtExceptions() const;
+
+  private:
+    void rethrow();
+    void captureException();
+
+    std::vector<std::exception_ptr> exceptions_;
+  };
+
 
 }
 

Modified: trunk/getfem/src/getfem_models.cc
URL: 
http://svn.gna.org/viewcvs/getfem/trunk/getfem/src/getfem_models.cc?rev=5086&r1=5085&r2=5086&view=diff
==============================================================================
--- trunk/getfem/src/getfem_models.cc   (original)
+++ trunk/getfem/src/getfem_models.cc   Fri Sep 11 13:27:59 2015
@@ -416,13 +416,17 @@
                     distro<decltype(rTM)>  distro_rTM(rTM);
                     gmm::standard_locale locale;
                     open_mp_is_running_properly check;
+                    thread_exception exception;
                     #pragma omp parallel default(shared)
                     {
-                      ga_workspace workspace(*this);
-                      for (auto &&ge : generic_expressions)
-                        workspace.add_expression(ge.expr, ge.mim, ge.region);
-                      workspace.set_assembled_matrix(distro_rTM);
-                      workspace.assembly(2);
+                      exception.run([&]
+                      {
+                        ga_workspace workspace(*this);
+                        for (auto &&ge : generic_expressions)
+                          workspace.add_expression(ge.expr, ge.mim, ge.region);
+                        workspace.set_assembled_matrix(distro_rTM);
+                        workspace.assembly(2);
+                      });
                     } //parallel
                   } //distro scope
                   gmm::add
@@ -1702,14 +1706,18 @@
         /*running the assembly in parallel*/
         gmm::standard_locale locale;
         open_mp_is_running_properly check;
+        thread_exception exception;
         #pragma omp parallel default(shared)
         {
-          brick.pbr->asm_complex_tangent_terms(*this, ib, brick.vlist,
-                                               brick.dlist, brick.mims,
-                                               cmatlist,
-                                               cveclist,
-                                               cveclist_sym,
-                                               brick.region, version);
+          exception.run([&]
+          {
+            brick.pbr->asm_complex_tangent_terms(*this, ib, brick.vlist,
+                                                 brick.dlist, brick.mims,
+                                                 cmatlist,
+                                                 cveclist,
+                                                 cveclist_sym,
+                                                 brick.region, version);
+          });
         }
 
       }
@@ -1757,15 +1765,19 @@
         /*running the assembly in parallel*/
         gmm::standard_locale locale;
         open_mp_is_running_properly check;
+        thread_exception exception;
         #pragma omp parallel default(shared)
         {
-          brick.pbr->asm_real_tangent_terms(*this, ib, brick.vlist,
-                                            brick.dlist, brick.mims,
-                                            rmatlist,
-                                            rveclist,
-                                            rveclist_sym,
-                                            brick.region,
-                                            version);
+          exception.run([&]
+          {
+            brick.pbr->asm_real_tangent_terms(*this, ib, brick.vlist,
+                                              brick.dlist, brick.mims,
+                                              rmatlist,
+                                              rveclist,
+                                              rveclist_sym,
+                                              brick.region,
+                                              version);
+          });
         }
       }
       brick.pbr->real_post_assembly_in_serial(*this, ib, brick.vlist,
@@ -2601,31 +2613,34 @@
         /*running the assembly in parallel*/
         gmm::standard_locale locale;
         open_mp_is_running_properly check;
-
+        thread_exception exception;
         #pragma omp parallel default(shared)
         {
-          GMM_TRACE2("Global generic assembly");
-          ga_workspace workspace(*this);
-
-          for (auto &&ge : generic_expressions) 
workspace.add_expression(ge.expr, ge.mim, ge.region);
-
-          if (version & BUILD_RHS) {
-            if (is_complex()) {
-              GMM_ASSERT1(false, "to be done");
-            } else {
-              workspace.set_assembled_vector(residual_distributed);
-              workspace.assembly(1);
+          exception.run([&]
+          {
+            GMM_TRACE2("Global generic assembly");
+            ga_workspace workspace(*this);
+
+            for (auto &&ge : generic_expressions) 
workspace.add_expression(ge.expr, ge.mim, ge.region);
+
+            if (version & BUILD_RHS) {
+              if (is_complex()) {
+                GMM_ASSERT1(false, "to be done");
+              } else {
+                workspace.set_assembled_vector(residual_distributed);
+                workspace.assembly(1);
+              }
             }
-          }
-
-          if (version & BUILD_MATRIX) {
-            if (is_complex()) {
-              GMM_ASSERT1(false, "to be done");
-            } else {
-              workspace.set_assembled_matrix(tangent_matrix_distributed);
-              workspace.assembly(2);
+
+            if (version & BUILD_MATRIX) {
+              if (is_complex()) {
+                GMM_ASSERT1(false, "to be done");
+              } else {
+                workspace.set_assembled_matrix(tangent_matrix_distributed);
+                workspace.assembly(2);
+              }
             }
-          }
+          });//exception.run(
         } //#pragma omp parallel
       } //end of distro scope
 

Modified: trunk/getfem/src/getfem_omp.cc
URL: 
http://svn.gna.org/viewcvs/getfem/trunk/getfem/src/getfem_omp.cc?rev=5086&r1=5085&r2=5086&view=diff
==============================================================================
--- trunk/getfem/src/getfem_omp.cc      (original)
+++ trunk/getfem/src/getfem_omp.cc      Fri Sep 11 13:27:59 2015
@@ -23,22 +23,22 @@
 #include "getfem/getfem_omp.h"
 #include "getfem/getfem_level_set_contact.h"
 
-namespace getfem{ 
+namespace getfem{
 
 #ifdef GETFEM_HAVE_OPENMP
 
   boost::recursive_mutex omp_guard::boost_mutex;
 
-  omp_guard::omp_guard() 
-    : boost::lock_guard<boost::recursive_mutex>(boost_mutex) 
+  omp_guard::omp_guard()
+    : boost::lock_guard<boost::recursive_mutex>(boost_mutex)
   {}
 
-  local_guard::local_guard(boost::recursive_mutex& m) : 
-    mutex_(m), 
+  local_guard::local_guard(boost::recursive_mutex& m) :
+    mutex_(m),
     plock_(new boost::lock_guard<boost::recursive_mutex>(m))
   { }
 
-  local_guard::local_guard(const local_guard& guard) 
+  local_guard::local_guard(const local_guard& guard)
     : mutex_(guard.mutex_), plock_(guard.plock_)
   { }
 
@@ -49,10 +49,6 @@
   }
 #endif
 
-
-
-
-
   omp_distribute<bool> open_mp_is_running_properly::answer = false;
   open_mp_is_running_properly::open_mp_is_running_properly()
   {answer.all_threads()=true;}
@@ -60,7 +56,7 @@
   {answer.all_threads()=false;}
   bool open_mp_is_running_properly::is_it(){return answer;}
 
-  region_partition::region_partition(const region_partition& rp) : 
+  region_partition::region_partition(const region_partition& rp) :
     pparent_mesh(rp.pparent_mesh),
     original_region(rp.original_region),
     partitions(rp.partitions)  {   }
@@ -95,7 +91,7 @@
       GMM_ASSERT1(pm->has_region(id),"Improper region number");
       original_region.reset(new mesh_region(pm->region(id)));
     }
-    if (me_is_multithreaded_now()) 
+    if (me_is_multithreaded_now())
       GMM_WARNING0("building partitions inside parallel region");
 
     omp_guard scoped_lock;
@@ -107,7 +103,7 @@
     mr_visitor mr(*original_region);
     for(size_type thread = 0; thread<num_threads();thread++)
     {
-      partitions[thread] = 
+      partitions[thread] =
         
getfem::mesh_region::free_region_id(*(original_region->get_parent_mesh()));
       mesh_region partition;
       for(size_type i=thread*psize;i<(thread+1)*psize && 
!mr.finished();i++,++mr)
@@ -138,4 +134,32 @@
 
   }
 
+  thread_exception::thread_exception(): exceptions_(num_threads(), nullptr)
+  {}
+
+  thread_exception::~thread_exception() {rethrow();}
+
+  std::vector<std::exception_ptr> thread_exception::caughtExceptions() const
+  {
+    std::vector<std::exception_ptr> exceptions;
+    for (auto &&pException : exceptions_)
+    {
+      if (pException != nullptr) exceptions.push_back(pException);
+    }
+    return exceptions;
+  }
+
+  void thread_exception::rethrow()
+  {
+    for (auto &&pException : exceptions_)
+    {
+      if (pException != nullptr) std::rethrow_exception(pException);
+    }
+  }
+
+  void thread_exception::captureException()
+  {
+    exceptions_[omp_get_thread_num()] = std::current_exception();
+  }
+
 }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]