qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC v7 01/16] exec.c: Add new exclusive bitmap to ram_


From: alvise rigo
Subject: Re: [Qemu-devel] [RFC v7 01/16] exec.c: Add new exclusive bitmap to ram_list
Date: Thu, 11 Feb 2016 14:21:18 +0100

You are right, the for loop with i < DIRTY_MEMORY_NUM works just fine.

Thank you,
alvise

On Thu, Feb 11, 2016 at 2:00 PM, Alex Bennée <address@hidden> wrote:
>
> Alvise Rigo <address@hidden> writes:
>
>> The purpose of this new bitmap is to flag the memory pages that are in
>> the middle of LL/SC operations (after a LL, before a SC). For all these
>> pages, the corresponding TLB entries will be generated in such a way to
>> force the slow-path for all the VCPUs (see the following patches).
>>
>> When the system starts, the whole memory is set to dirty.
>>
>> Suggested-by: Jani Kokkonen <address@hidden>
>> Suggested-by: Claudio Fontana <address@hidden>
>> Signed-off-by: Alvise Rigo <address@hidden>
>> ---
>>  exec.c                  |  7 +++++--
>>  include/exec/memory.h   |  3 ++-
>>  include/exec/ram_addr.h | 31 +++++++++++++++++++++++++++++++
>>  3 files changed, 38 insertions(+), 3 deletions(-)
>>
>> diff --git a/exec.c b/exec.c
>> index 7115403..51f366d 100644
>> --- a/exec.c
>> +++ b/exec.c
>> @@ -1575,11 +1575,14 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, 
>> Error **errp)
>>          int i;
>>
>>          /* ram_list.dirty_memory[] is protected by the iothread lock.  */
>> -        for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
>> +        for (i = 0; i < DIRTY_MEMORY_EXCLUSIVE; i++) {
>>              ram_list.dirty_memory[i] =
>>                  bitmap_zero_extend(ram_list.dirty_memory[i],
>>                                     old_ram_size, new_ram_size);
>> -       }
>> +        }
>> +        ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE] =
>> +            
>> bitmap_zero_extend(ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE],
>> +                               old_ram_size, new_ram_size);
>
> In the previous patch you moved this out of the loop as
> ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE] was a different size to
> the other dirty bitmaps. This no longer seems to be the case so this
> seems pointless.
>
>>      }
>>      cpu_physical_memory_set_dirty_range(new_block->offset,
>>                                          new_block->used_length,
>> diff --git a/include/exec/memory.h b/include/exec/memory.h
>> index c92734a..71e0480 100644
>> --- a/include/exec/memory.h
>> +++ b/include/exec/memory.h
>> @@ -19,7 +19,8 @@
>>  #define DIRTY_MEMORY_VGA       0
>>  #define DIRTY_MEMORY_CODE      1
>>  #define DIRTY_MEMORY_MIGRATION 2
>> -#define DIRTY_MEMORY_NUM       3        /* num of dirty bits */
>> +#define DIRTY_MEMORY_EXCLUSIVE 3
>> +#define DIRTY_MEMORY_NUM       4        /* num of dirty bits */
>>
>>  #include <stdint.h>
>>  #include <stdbool.h>
>> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
>> index ef1489d..19789fc 100644
>> --- a/include/exec/ram_addr.h
>> +++ b/include/exec/ram_addr.h
>> @@ -21,6 +21,7 @@
>>
>>  #ifndef CONFIG_USER_ONLY
>>  #include "hw/xen/xen.h"
>> +#include "sysemu/sysemu.h"
>>
>>  struct RAMBlock {
>>      struct rcu_head rcu;
>> @@ -172,6 +173,9 @@ static inline void 
>> cpu_physical_memory_set_dirty_range(ram_addr_t start,
>>      if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
>>          bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
>>      }
>> +    if (unlikely(mask & (1 << DIRTY_MEMORY_EXCLUSIVE))) {
>> +        bitmap_set_atomic(d[DIRTY_MEMORY_EXCLUSIVE], page, end - page);
>> +    }
>>      xen_modified_memory(start, length);
>>  }
>>
>> @@ -287,5 +291,32 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned 
>> long *dest,
>>  }
>>
>>  void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
>> +
>> +/* Exclusive bitmap support. */
>> +#define EXCL_BITMAP_GET_OFFSET(addr) (addr >> TARGET_PAGE_BITS)
>> +
>> +/* Make the page of @addr not exclusive. */
>> +static inline void cpu_physical_memory_unset_excl(ram_addr_t addr)
>> +{
>> +    set_bit_atomic(EXCL_BITMAP_GET_OFFSET(addr),
>> +                   ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE]);
>> +}
>> +
>> +/* Return true if the page of @addr is exclusive, i.e. the EXCL bit is set. 
>> */
>> +static inline int cpu_physical_memory_is_excl(ram_addr_t addr)
>> +{
>> +    return !test_bit(EXCL_BITMAP_GET_OFFSET(addr),
>> +                     ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE]);
>> +}
>> +
>> +/* Set the page of @addr as exclusive clearing its EXCL bit and return the
>> + * previous bit's state. */
>> +static inline int cpu_physical_memory_set_excl(ram_addr_t addr)
>> +{
>> +    return bitmap_test_and_clear_atomic(
>> +                                
>> ram_list.dirty_memory[DIRTY_MEMORY_EXCLUSIVE],
>> +                                EXCL_BITMAP_GET_OFFSET(addr), 1);
>> +}
>> +
>>  #endif
>>  #endif
>
>
> --
> Alex Bennée



reply via email to

[Prev in Thread] Current Thread [Next in Thread]