qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 1/2] qemu/atomic: Update coding style to make checkpatch.pl ha


From: Philippe Mathieu-Daudé
Subject: [PATCH v4 1/2] qemu/atomic: Update coding style to make checkpatch.pl happier
Date: Wed, 23 Sep 2020 17:19:00 +0200

To limit the number of checkpatch errors in the next commit,
clean coding style issues first.

Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
False positive:

 ERROR: Use of volatile is usually wrong, please add a comment
 #11: FILE: include/qemu/atomic.h:328:
 +#define atomic_read__nocheck(p)   (*(__typeof__(*(p)) volatile *) (p))

 ERROR: Use of volatile is usually wrong, please add a comment
 #12: FILE: include/qemu/atomic.h:329:
 +#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile *) (p)) = (i))
---
 include/qemu/atomic.h | 9 +++++----
 util/bitmap.c         | 3 ++-
 util/rcu.c            | 6 ++++--
 3 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index ff72db51154..1774133e5d0 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -325,11 +325,11 @@
 /* These will only be atomic if the processor does the fetch or store
  * in a single issue memory operation
  */
-#define atomic_read__nocheck(p)   (*(__typeof__(*(p)) volatile*) (p))
-#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
+#define atomic_read__nocheck(p)   (*(__typeof__(*(p)) volatile *) (p))
+#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile *) (p)) = (i))
 
 #define atomic_read(ptr)       atomic_read__nocheck(ptr)
-#define atomic_set(ptr, i)     atomic_set__nocheck(ptr,i)
+#define atomic_set(ptr, i)     atomic_set__nocheck(ptr, i)
 
 /**
  * atomic_rcu_read - reads a RCU-protected pointer to a local variable
@@ -440,7 +440,8 @@
 #endif
 #endif
 
-/* atomic_mb_read/set semantics map Java volatile variables. They are
+/*
+ * atomic_mb_read/set semantics map Java volatile variables. They are
  * less expensive on some platforms (notably POWER) than fully
  * sequentially consistent operations.
  *
diff --git a/util/bitmap.c b/util/bitmap.c
index 1753ff7f5bd..c4fb86db72a 100644
--- a/util/bitmap.c
+++ b/util/bitmap.c
@@ -211,7 +211,8 @@ void bitmap_set_atomic(unsigned long *map, long start, long 
nr)
         mask_to_set &= BITMAP_LAST_WORD_MASK(size);
         atomic_or(p, mask_to_set);
     } else {
-        /* If we avoided the full barrier in atomic_or(), issue a
+        /*
+         * If we avoided the full barrier in atomic_or(), issue a
          * barrier to account for the assignments in the while loop.
          */
         smp_mb();
diff --git a/util/rcu.c b/util/rcu.c
index c4fefa9333e..b5238b8ed02 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -82,7 +82,8 @@ static void wait_for_readers(void)
          */
         qemu_event_reset(&rcu_gp_event);
 
-        /* Instead of using atomic_mb_set for index->waiting, and
+        /*
+         * Instead of using atomic_mb_set for index->waiting, and
          * atomic_mb_read for index->ctr, memory barriers are placed
          * manually since writes to different threads are independent.
          * qemu_event_reset has acquire semantics, so no memory barrier
@@ -151,7 +152,8 @@ void synchronize_rcu(void)
 
     QEMU_LOCK_GUARD(&rcu_registry_lock);
     if (!QLIST_EMPTY(&registry)) {
-        /* In either case, the atomic_mb_set below blocks stores that free
+        /*
+         * In either case, the atomic_mb_set below blocks stores that free
          * old RCU-protected pointers.
          */
         if (sizeof(rcu_gp_ctr) < 8) {
-- 
2.26.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]