qemu-block
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-block] [PATCH 06/13] HBitmap: Introduce "meta" bitmap to track


From: Vladimir Sementsov-Ogievskiy
Subject: Re: [Qemu-block] [PATCH 06/13] HBitmap: Introduce "meta" bitmap to track bit changes
Date: Mon, 11 Jan 2016 18:40:24 +0300
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Thunderbird/31.8.0

On 04.01.2016 13:27, Fam Zheng wrote:
Upon each bit toggle, the corresponding bit in the meta bitmap will be
set.

Signed-off-by: Fam Zheng <address@hidden>
---
  include/qemu/hbitmap.h |  8 +++++++
  util/hbitmap.c         | 61 +++++++++++++++++++++++++++++++++++++-------------
  2 files changed, 54 insertions(+), 15 deletions(-)

diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index bb94a00..ed672e7 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -181,6 +181,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap 
*hb, uint64_t first);
   */
  unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
+/* hbitmap_create_meta
+ * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
+ *
+ * @hb: The HBitmap to operate on.
+ * @chunk_size: How many bits in @hb does one bit in the meta track.
+ */
+HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size);
+
  /**
   * hbitmap_iter_next:
   * @hbi: HBitmapIter to operate on.
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 50b888f..55d3182 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -81,6 +81,9 @@ struct HBitmap {
       */
      int granularity;
+ /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
+    HBitmap *meta;
+
      /* A number of progressively less coarse bitmaps (i.e. level 0 is the
       * coarsest).  Each bit in level N represents a word in level N+1 that
       * has a set bit, except the last level where each bit represents the
@@ -212,25 +215,27 @@ static uint64_t hb_count_between(HBitmap *hb, uint64_t 
start, uint64_t last)
  }
/* Setting starts at the last layer and propagates up if an element
- * changes from zero to non-zero.
+ * changes.
   */
  static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t 
last)
  {
      unsigned long mask;
-    bool changed;
+    unsigned long old;
assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
      assert(start <= last);
mask = 2UL << (last & (BITS_PER_LONG - 1));
      mask -= 1UL << (start & (BITS_PER_LONG - 1));
-    changed = (*elem == 0);
+    old = *elem;
      *elem |= mask;
-    return changed;
+    return old != *elem;
  }
-/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
-static void hb_set_between(HBitmap *hb, int level, uint64_t start, uint64_t 
last)
+/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
+ * Returns true if at least one bit is changed. */
+static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
+                           uint64_t last)
  {
      size_t pos = start >> BITS_PER_LEVEL;
      size_t lastpos = last >> BITS_PER_LEVEL;
@@ -259,22 +264,27 @@ static void hb_set_between(HBitmap *hb, int level, 
uint64_t start, uint64_t last
      if (level > 0 && changed) {
          hb_set_between(hb, level - 1, pos, lastpos);
      }
+    return changed;
  }
void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
  {
      /* Compute range in the last layer.  */
+    uint64_t first, n;
      uint64_t last = start + count - 1;
trace_hbitmap_set(hb, start, count,
                        start >> hb->granularity, last >> hb->granularity);
- start >>= hb->granularity;
+    first = start >> hb->granularity;
      last >>= hb->granularity;
-    count = last - start + 1;
+    n = last - first + 1;
- hb->count += count - hb_count_between(hb, start, last);
-    hb_set_between(hb, HBITMAP_LEVELS - 1, start, last);
+    hb->count += n - hb_count_between(hb, first, last);
+    if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
+        hb->meta) {

I don't know, what optimizer things about it, but definetly

+    if (hb->meta &&
+        hb_set_between(hb, HBITMAP_LEVELS - 1, first, last))

should work faster for most cases, when hb->meta == NULL.


+        hbitmap_set(hb->meta, start, count);
+    }
  }
/* Resetting works the other way round: propagate up if the new
@@ -295,8 +305,10 @@ static inline bool hb_reset_elem(unsigned long *elem, 
uint64_t start, uint64_t l
      return blanked;
  }
-/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... */
-static void hb_reset_between(HBitmap *hb, int level, uint64_t start, uint64_t 
last)
+/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
+ * Returns true if at least one bit is changed. */
+static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
+                             uint64_t last)
  {
      size_t pos = start >> BITS_PER_LEVEL;
      size_t lastpos = last >> BITS_PER_LEVEL;
@@ -339,21 +351,28 @@ static void hb_reset_between(HBitmap *hb, int level, 
uint64_t start, uint64_t la
      if (level > 0 && changed) {
          hb_reset_between(hb, level - 1, pos, lastpos);
      }
+
+    return changed;
+
  }
void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
  {
      /* Compute range in the last layer.  */
+    uint64_t first;
      uint64_t last = start + count - 1;
trace_hbitmap_reset(hb, start, count,
                          start >> hb->granularity, last >> hb->granularity);
- start >>= hb->granularity;
+    first = start >> hb->granularity;
      last >>= hb->granularity;
- hb->count -= hb_count_between(hb, start, last);
-    hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last);
+    hb->count -= hb_count_between(hb, first, last);
+    if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
+        hb->meta) {

and here

+        hbitmap_set(hb->meta, start, count);
+    }
  }
void hbitmap_reset_all(HBitmap *hb)
@@ -384,6 +403,9 @@ void hbitmap_free(HBitmap *hb)
      for (i = HBITMAP_LEVELS; i-- > 0; ) {
          g_free(hb->levels[i]);
      }
+    if (hb->meta) {
+        hbitmap_free(hb->meta);
+    }

hmm, not obvious for me.. why not "the one who creates must than destroy"?

      g_free(hb);
  }
@@ -493,3 +515,12 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b) return true;
  }
+
+HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
+{
+    assert(!(chunk_size & (chunk_size - 1)));
+    assert(!hb->meta);
+    hb->meta = hbitmap_alloc(hb->size << hb->granularity,
+                             hb->granularity + ctz32(chunk_size));
+    return hb->meta;
+}


--
Best regards,
Vladimir
* now, @virtuozzo.com instead of @parallels.com. Sorry for this inconvenience.




reply via email to

[Prev in Thread] Current Thread [Next in Thread]