qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 2/2] lockable: replaced locks with lock guard macros where


From: Daniel Brodsky
Subject: Re: [PATCH v3 2/2] lockable: replaced locks with lock guard macros where appropriate
Date: Fri, 20 Mar 2020 05:46:18 -0700

On Fri, Mar 20, 2020 at 5:34 AM Dr. David Alan Gilbert
<address@hidden> wrote:
>
> * address@hidden (address@hidden) wrote:
> > From: Daniel Brodsky <address@hidden>
> >
> > - ran regexp "qemu_mutex_lock\(.*\).*\n.*if" to find targets
> > - replaced result with QEMU_LOCK_GUARD if all unlocks at function end
> > - replaced result with WITH_QEMU_LOCK_GUARD if unlock not at end
> >
> > Signed-off-by: Daniel Brodsky <address@hidden>
> > ---
> >  block/iscsi.c         | 11 +++-------
> >  block/nfs.c           | 51 ++++++++++++++++++++-----------------------
> >  cpus-common.c         | 13 +++++------
> >  hw/display/qxl.c      | 43 +++++++++++++++++-------------------
> >  hw/vfio/platform.c    |  5 ++---
> >  migration/migration.c |  3 +--
> >  migration/multifd.c   |  8 +++----
> >  migration/ram.c       |  3 +--
> >  monitor/misc.c        |  4 +---
> >  ui/spice-display.c    | 14 ++++++------
> >  util/log.c            |  4 ++--
> >  util/qemu-timer.c     | 17 +++++++--------
> >  util/rcu.c            |  8 +++----
> >  util/thread-pool.c    |  3 +--
> >  util/vfio-helpers.c   |  4 ++--
> >  15 files changed, 84 insertions(+), 107 deletions(-)
> >
>
> <snip>
>
> > diff --git a/migration/migration.c b/migration/migration.c
> > index c1d88ace7f..2f0bd6d8b4 100644
> > --- a/migration/migration.c
> > +++ b/migration/migration.c
> > @@ -1652,11 +1652,10 @@ static void migrate_fd_cleanup_bh(void *opaque)
> >
> >  void migrate_set_error(MigrationState *s, const Error *error)
> >  {
> > -    qemu_mutex_lock(&s->error_mutex);
> > +    QEMU_LOCK_GUARD(&s->error_mutex);
> >      if (!s->error) {
> >          s->error = error_copy(error);
> >      }
> > -    qemu_mutex_unlock(&s->error_mutex);
> >  }
>
> There are some other places in migration.c that would really benefit;
> for example, migrate_send_rp_message, if you use a LOCK_QUARD
> there, then you can remove the 'int ret', and the goto error.
> In postcopy_pause, the locks on qemu_file_lock would work well using the
> WITH_QEMU_LOCK_GUARD.

I did a basic pass through for targets and that one didn't come up. I can add
more replacements later, but there are ~300 mutex locks that might be worth
replacing and going through them manually in one go is too tedious.

> >  void migrate_fd_error(MigrationState *s, const Error *error)
> > diff --git a/migration/multifd.c b/migration/multifd.c
> > index cb6a4a3ab8..9123c111a3 100644
> > --- a/migration/multifd.c
> > +++ b/migration/multifd.c
> > @@ -894,11 +894,11 @@ void multifd_recv_sync_main(void)
> >      for (i = 0; i < migrate_multifd_channels(); i++) {
> >          MultiFDRecvParams *p = &multifd_recv_state->params[i];
> >
> > -        qemu_mutex_lock(&p->mutex);
> > -        if (multifd_recv_state->packet_num < p->packet_num) {
> > -            multifd_recv_state->packet_num = p->packet_num;
> > +        WITH_QEMU_LOCK_GUARD(&p->mutex) {
> > +            if (multifd_recv_state->packet_num < p->packet_num) {
> > +                multifd_recv_state->packet_num = p->packet_num;
> > +            }
> >          }
> > -        qemu_mutex_unlock(&p->mutex);
> >          trace_multifd_recv_sync_main_signal(p->id);
> >          qemu_sem_post(&p->sem_sync);
> >      }
>
> > diff --git a/migration/ram.c b/migration/ram.c
> > index c12cfdbe26..87a670cfbf 100644
> > --- a/migration/ram.c
> > +++ b/migration/ram.c
> > @@ -1368,7 +1368,7 @@ static RAMBlock *unqueue_page(RAMState *rs, 
> > ram_addr_t *offset)
> >          return NULL;
> >      }
> >
> > -    qemu_mutex_lock(&rs->src_page_req_mutex);
> > +    QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
> >      if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
> >          struct RAMSrcPageRequest *entry =
> >                                  QSIMPLEQ_FIRST(&rs->src_page_requests);
> > @@ -1385,7 +1385,6 @@ static RAMBlock *unqueue_page(RAMState *rs, 
> > ram_addr_t *offset)
> >              migration_consume_urgent_request();
> >          }
> >      }
> > -    qemu_mutex_unlock(&rs->src_page_req_mutex);
> >
> >      return block;
> >  }
>
> Is there a reason you didn't do the matching pair at the bottom of
> ram_save_queue_pages ?
>
> Dave
>

According to https://wiki.qemu.org/ToDo/LockGuards cases that are trivial (no
conditional logic) shouldn't be replaced.

> > diff --git a/monitor/misc.c b/monitor/misc.c
> > index 6c45fa490f..9723b466cd 100644
> > --- a/monitor/misc.c
> > +++ b/monitor/misc.c
> > @@ -1473,7 +1473,7 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool 
> > has_fdset_id, int64_t fdset_id,
> >      MonFdsetFd *mon_fdset_fd;
> >      AddfdInfo *fdinfo;
> >
> > -    qemu_mutex_lock(&mon_fdsets_lock);
> > +    QEMU_LOCK_GUARD(&mon_fdsets_lock);
> >      if (has_fdset_id) {
> >          QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
> >              /* Break if match found or match impossible due to ordering by 
> > ID */
> > @@ -1494,7 +1494,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool 
> > has_fdset_id, int64_t fdset_id,
> >              if (fdset_id < 0) {
> >                  error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
> >                             "a non-negative value");
> > -                qemu_mutex_unlock(&mon_fdsets_lock);
> >                  return NULL;
> >              }
> >              /* Use specified fdset ID */
> > @@ -1545,7 +1544,6 @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool 
> > has_fdset_id, int64_t fdset_id,
> >      fdinfo->fdset_id = mon_fdset->id;
> >      fdinfo->fd = mon_fdset_fd->fd;
> >
> > -    qemu_mutex_unlock(&mon_fdsets_lock);
> >      return fdinfo;
> >  }
> >
> > diff --git a/ui/spice-display.c b/ui/spice-display.c
> > index 6babe24909..19632fdf6c 100644
> > --- a/ui/spice-display.c
> > +++ b/ui/spice-display.c
> > @@ -18,6 +18,7 @@
> >  #include "qemu/osdep.h"
> >  #include "ui/qemu-spice.h"
> >  #include "qemu/timer.h"
> > +#include "qemu/lockable.h"
> >  #include "qemu/main-loop.h"
> >  #include "qemu/option.h"
> >  #include "qemu/queue.h"
> > @@ -483,12 +484,12 @@ void qemu_spice_display_refresh(SimpleSpiceDisplay 
> > *ssd)
> >  {
> >      graphic_hw_update(ssd->dcl.con);
> >
> > -    qemu_mutex_lock(&ssd->lock);
> > -    if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
> > -        qemu_spice_create_update(ssd);
> > -        ssd->notify++;
> > +    WITH_QEMU_LOCK_GUARD(&ssd->lock) {
> > +        if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
> > +            qemu_spice_create_update(ssd);
> > +            ssd->notify++;
> > +        }
> >      }
> > -    qemu_mutex_unlock(&ssd->lock);
> >
> >      trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
> >      if (ssd->notify) {
> > @@ -580,7 +581,7 @@ static int interface_get_cursor_command(QXLInstance 
> > *sin, QXLCommandExt *ext)
> >      SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
> >      int ret;
> >
> > -    qemu_mutex_lock(&ssd->lock);
> > +    QEMU_LOCK_GUARD(&ssd->lock);
> >      if (ssd->ptr_define) {
> >          *ext = ssd->ptr_define->ext;
> >          ssd->ptr_define = NULL;
> > @@ -592,7 +593,6 @@ static int interface_get_cursor_command(QXLInstance 
> > *sin, QXLCommandExt *ext)
> >      } else {
> >          ret = false;
> >      }
> > -    qemu_mutex_unlock(&ssd->lock);
> >      return ret;
> >  }
> >
> > diff --git a/util/log.c b/util/log.c
> > index 2da6cb31dc..bdb3d712e8 100644
> > --- a/util/log.c
> > +++ b/util/log.c
> > @@ -25,6 +25,7 @@
> >  #include "qemu/cutils.h"
> >  #include "trace/control.h"
> >  #include "qemu/thread.h"
> > +#include "qemu/lockable.h"
> >
> >  static char *logfilename;
> >  static QemuMutex qemu_logfile_mutex;
> > @@ -94,7 +95,7 @@ void qemu_set_log(int log_flags)
> >      if (qemu_loglevel && (!is_daemonized() || logfilename)) {
> >          need_to_open_file = true;
> >      }
> > -    qemu_mutex_lock(&qemu_logfile_mutex);
> > +    QEMU_LOCK_GUARD(&qemu_logfile_mutex);
> >      if (qemu_logfile && !need_to_open_file) {
> >          logfile = qemu_logfile;
> >          atomic_rcu_set(&qemu_logfile, NULL);
> > @@ -136,7 +137,6 @@ void qemu_set_log(int log_flags)
> >          }
> >          atomic_rcu_set(&qemu_logfile, logfile);
> >      }
> > -    qemu_mutex_unlock(&qemu_logfile_mutex);
> >  }
> >
> >  void qemu_log_needs_buffers(void)
> > diff --git a/util/qemu-timer.c b/util/qemu-timer.c
> > index d548d3c1ad..b6575a2cd5 100644
> > --- a/util/qemu-timer.c
> > +++ b/util/qemu-timer.c
> > @@ -459,17 +459,16 @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t 
> > expire_time)
> >      QEMUTimerList *timer_list = ts->timer_list;
> >      bool rearm;
> >
> > -    qemu_mutex_lock(&timer_list->active_timers_lock);
> > -    if (ts->expire_time == -1 || ts->expire_time > expire_time) {
> > -        if (ts->expire_time != -1) {
> > -            timer_del_locked(timer_list, ts);
> > +    WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
> > +        if (ts->expire_time == -1 || ts->expire_time > expire_time) {
> > +            if (ts->expire_time != -1) {
> > +                timer_del_locked(timer_list, ts);
> > +            }
> > +            rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
> > +        } else {
> > +            rearm = false;
> >          }
> > -        rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
> > -    } else {
> > -        rearm = false;
> >      }
> > -    qemu_mutex_unlock(&timer_list->active_timers_lock);
> > -
> >      if (rearm) {
> >          timerlist_rearm(timer_list);
> >      }
> > diff --git a/util/rcu.c b/util/rcu.c
> > index 177a675619..60a37f72c3 100644
> > --- a/util/rcu.c
> > +++ b/util/rcu.c
> > @@ -31,6 +31,7 @@
> >  #include "qemu/atomic.h"
> >  #include "qemu/thread.h"
> >  #include "qemu/main-loop.h"
> > +#include "qemu/lockable.h"
> >  #if defined(CONFIG_MALLOC_TRIM)
> >  #include <malloc.h>
> >  #endif
> > @@ -141,14 +142,14 @@ static void wait_for_readers(void)
> >
> >  void synchronize_rcu(void)
> >  {
> > -    qemu_mutex_lock(&rcu_sync_lock);
> > +    QEMU_LOCK_GUARD(&rcu_sync_lock);
> >
> >      /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
> >       * Pairs with smp_mb_placeholder() in rcu_read_lock().
> >       */
> >      smp_mb_global();
> >
> > -    qemu_mutex_lock(&rcu_registry_lock);
> > +    QEMU_LOCK_GUARD(&rcu_registry_lock);
> >      if (!QLIST_EMPTY(&registry)) {
> >          /* In either case, the atomic_mb_set below blocks stores that free
> >           * old RCU-protected pointers.
> > @@ -169,9 +170,6 @@ void synchronize_rcu(void)
> >
> >          wait_for_readers();
> >      }
> > -
> > -    qemu_mutex_unlock(&rcu_registry_lock);
> > -    qemu_mutex_unlock(&rcu_sync_lock);
> >  }
> >
> >
> > diff --git a/util/thread-pool.c b/util/thread-pool.c
> > index 4ed9b89ab2..d763cea505 100644
> > --- a/util/thread-pool.c
> > +++ b/util/thread-pool.c
> > @@ -210,7 +210,7 @@ static void thread_pool_cancel(BlockAIOCB *acb)
> >
> >      trace_thread_pool_cancel(elem, elem->common.opaque);
> >
> > -    qemu_mutex_lock(&pool->lock);
> > +    QEMU_LOCK_GUARD(&pool->lock);
> >      if (elem->state == THREAD_QUEUED &&
> >          /* No thread has yet started working on elem. we can try to "steal"
> >           * the item from the worker if we can get a signal from the
> > @@ -225,7 +225,6 @@ static void thread_pool_cancel(BlockAIOCB *acb)
> >          elem->ret = -ECANCELED;
> >      }
> >
> > -    qemu_mutex_unlock(&pool->lock);
> >  }
> >
> >  static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)
> > diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
> > index ddd9a96e76..b310b23003 100644
> > --- a/util/vfio-helpers.c
> > +++ b/util/vfio-helpers.c
> > @@ -21,6 +21,7 @@
> >  #include "standard-headers/linux/pci_regs.h"
> >  #include "qemu/event_notifier.h"
> >  #include "qemu/vfio-helpers.h"
> > +#include "qemu/lockable.h"
> >  #include "trace.h"
> >
> >  #define QEMU_VFIO_DEBUG 0
> > @@ -667,14 +668,13 @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s)
> >          .size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
> >      };
> >      trace_qemu_vfio_dma_reset_temporary(s);
> > -    qemu_mutex_lock(&s->lock);
> > +    QEMU_LOCK_GUARD(&s->lock);
> >      if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
> >          error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
> >          qemu_mutex_unlock(&s->lock);
> >          return -errno;
> >      }
> >      s->high_water_mark = QEMU_VFIO_IOVA_MAX;
> > -    qemu_mutex_unlock(&s->lock);
> >      return 0;
> >  }
> >
> > --
> > 2.25.1
> >
> --
> Dr. David Alan Gilbert / address@hidden / Manchester, UK
>

Daniel



reply via email to

[Prev in Thread] Current Thread [Next in Thread]