qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC PATCH 02/20] Introduce cpu_physical_memory_get_dirty_r


From: Yoshiaki Tamura
Subject: [Qemu-devel] [RFC PATCH 02/20] Introduce cpu_physical_memory_get_dirty_range().
Date: Wed, 21 Apr 2010 14:57:07 +0900

It checks the first row and puts dirty addr in the array.  If the
first row is empty, it skips to the first non-dirty row or the end
addr, and put the length in the first entry of the array.

Signed-off-by: Yoshiaki Tamura <address@hidden>
Signed-off-by: OHMURA Kei <address@hidden>
---
 cpu-all.h |    4 +++
 exec.c    |   67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 71 insertions(+), 0 deletions(-)

diff --git a/cpu-all.h b/cpu-all.h
index 3f8762d..27187d4 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -1007,6 +1007,10 @@ static inline void 
cpu_physical_memory_mask_dirty_range(ram_addr_t start,
     }
 }
 
+int cpu_physical_memory_get_dirty_range(ram_addr_t start, ram_addr_t end, 
+                                        ram_addr_t *dirty_rams, int length,
+                                        int dirty_flags);
+
 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
                                      int dirty_flags);
 void cpu_tlb_update_dirty(CPUState *env);
diff --git a/exec.c b/exec.c
index bf8d703..d5c2a05 100644
--- a/exec.c
+++ b/exec.c
@@ -1962,6 +1962,73 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry 
*tlb_entry,
     }
 }
 
+/* It checks the first row and puts dirty addrs in the array.
+   If the first row is empty, it skips to the first non-dirty row
+   or the end addr, and put the length in the first entry of the array. */
+int cpu_physical_memory_get_dirty_range(ram_addr_t start, ram_addr_t end, 
+                                        ram_addr_t *dirty_rams, int length,
+                                        int dirty_flag)
+{
+    unsigned long p = 0, page_number;
+    ram_addr_t addr;
+    ram_addr_t s_idx = (start >> TARGET_PAGE_BITS) / HOST_LONG_BITS;
+    ram_addr_t e_idx = (end >> TARGET_PAGE_BITS) / HOST_LONG_BITS;
+    int i, j, offset, dirty_idx = dirty_flag_to_idx(dirty_flag);
+
+    /* mask bits before the start addr */
+    offset = (start >> TARGET_PAGE_BITS) & (HOST_LONG_BITS - 1);
+    cpu_physical_memory_sync_master(s_idx);
+    p |= phys_ram_dirty[dirty_idx][s_idx] & ~((1UL << offset) - 1);
+
+    if (s_idx == e_idx) {
+        /* mask bits after the end addr */
+        offset = (end >> TARGET_PAGE_BITS) & (HOST_LONG_BITS - 1);
+        p &= (1UL << offset) - 1;
+    }
+
+    if (p == 0) {
+        /* when the row is empty */
+        ram_addr_t skip;
+        if (s_idx == e_idx) {
+            skip = end;
+       } else {
+            /* skip empty rows */
+            while (s_idx < e_idx) {
+                s_idx++;
+                cpu_physical_memory_sync_master(s_idx);
+
+                if (phys_ram_dirty[dirty_idx][s_idx] != 0) {
+                    break;
+                }
+            }
+            skip = (s_idx * HOST_LONG_BITS * TARGET_PAGE_SIZE);
+        }
+        dirty_rams[0] = skip - start;
+        i = 0;
+
+    } else if (p == ~0UL) {
+        /* when the row is fully dirtied */
+        addr = start;
+        for (i = 0; i < length; i++) {
+            dirty_rams[i] = addr;
+            addr += TARGET_PAGE_SIZE;
+        }
+    } else {
+        /* when the row is partially dirtied */
+        i = 0;
+        do {
+            j = ffsl(p) - 1;
+            p &= ~(1UL << j);
+            page_number = s_idx * HOST_LONG_BITS + j;
+            addr = page_number * TARGET_PAGE_SIZE;
+            dirty_rams[i] = addr;
+            i++;
+        } while (p != 0 && i < length);
+    }
+
+    return i;
+}
+
 /* Note: start and end must be within the same ram block.  */
 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
                                      int dirty_flags)
-- 
1.7.0.31.g1df487





reply via email to

[Prev in Thread] Current Thread [Next in Thread]