qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC v6 13/14] softmmu: Include MMIO/invalid exclusive acce


From: Alvise Rigo
Subject: [Qemu-devel] [RFC v6 13/14] softmmu: Include MMIO/invalid exclusive accesses
Date: Mon, 14 Dec 2015 09:41:37 +0100

Enable exclusive accesses when the MMIO/invalid flag is set in the TLB
entry.
In case a LL access is done to MMIO memory, we treat it differently from
a RAM access in that we do not rely on the EXCL bitmap to flag the page
as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
slow path, since it is always forced anyway.

This commit does not take care of invalidating an MMIO exclusive range from
other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and
CPU2 writes to X. This will be addressed in the following commit.

Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
Signed-off-by: Alvise Rigo <address@hidden>
---
 cputlb.c                | 20 +++++++++++---------
 softmmu_llsc_template.h | 25 ++++++++++++++-----------
 softmmu_template.h      | 38 ++++++++++++++++++++------------------
 3 files changed, 45 insertions(+), 38 deletions(-)

diff --git a/cputlb.c b/cputlb.c
index 372877e..7c2669c 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -413,22 +413,24 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong 
vaddr,
         if ((memory_region_is_ram(section->mr) && section->readonly)
             || memory_region_is_romd(section->mr)) {
             /* Write access calls the I/O callback.  */
-            te->addr_write = address | TLB_MMIO;
+            address |= TLB_MMIO;
         } else if (memory_region_is_ram(section->mr)
                    && cpu_physical_memory_is_clean(section->mr->ram_addr
                                                    + xlat)) {
-            te->addr_write = address | TLB_NOTDIRTY;
-        } else {
-            if (!(address & TLB_MMIO) &&
-                cpu_physical_memory_atleast_one_excl(section->mr->ram_addr
-                                                           + xlat)) {
+            address |= TLB_NOTDIRTY;
+        }
+
+        /* Since the MMIO accesses follow always the slow path, we do not need
+         * to set any flag to trap the access */
+        if (!(address & TLB_MMIO)) {
+            if (cpu_physical_memory_atleast_one_excl(
+                                        section->mr->ram_addr + xlat)) {
                 /* There is at least one vCPU that has flagged the address as
                  * exclusive. */
-                te->addr_write = address | TLB_EXCL;
-            } else {
-                te->addr_write = address;
+                address |= TLB_EXCL;
             }
         }
+        te->addr_write = address;
     } else {
         te->addr_write = -1;
     }
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
index becb90b..bbc820e 100644
--- a/softmmu_llsc_template.h
+++ b/softmmu_llsc_template.h
@@ -71,17 +71,20 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, 
target_ulong addr,
      * plus the offset (i.e. addr & ~TARGET_PAGE_MASK) */
     hw_addr = (env->iotlb[mmu_idx][index].addr & TARGET_PAGE_MASK) + addr;
 
-    cpu_physical_memory_set_excl(hw_addr, this->cpu_index);
-    excl_history_put_addr(this, hw_addr);
-    /* If all the vCPUs have the EXCL bit set for this page there is no need
-     * to request any flush. */
-    if (cpu_physical_memory_not_excl(hw_addr, smp_cpus)) {
-        CPU_FOREACH(cpu) {
-            if (current_cpu != cpu) {
-                if (cpu_physical_memory_not_excl(hw_addr, cpu->cpu_index)) {
-                    cpu_physical_memory_set_excl(hw_addr, cpu->cpu_index);
-                    tlb_flush(cpu, 1);
-                    excl_history_put_addr(cpu, hw_addr);
+    /* No need to flush for MMIO addresses, the slow path is always used */
+    if (likely(!(env->tlb_table[mmu_idx][index].addr_read & TLB_MMIO))) {
+        cpu_physical_memory_set_excl(hw_addr, this->cpu_index);
+        excl_history_put_addr(this, hw_addr);
+        /* If all the vCPUs have the EXCL bit set for this page there is no 
need
+         * to request any flush. */
+        if (cpu_physical_memory_not_excl(hw_addr, smp_cpus)) {
+            CPU_FOREACH(cpu) {
+                if (current_cpu != cpu) {
+                    if (cpu_physical_memory_not_excl(hw_addr, cpu->cpu_index)) 
{
+                        cpu_physical_memory_set_excl(hw_addr, cpu->cpu_index);
+                        tlb_flush(cpu, 1);
+                        excl_history_put_addr(cpu, hw_addr);
+                    }
                 }
             }
         }
diff --git a/softmmu_template.h b/softmmu_template.h
index 262c95f..196beec 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -476,9 +476,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
 
     /* Handle an IO access or exclusive access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
-
-        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
+        if (tlb_addr & TLB_EXCL) {
+            CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
             CPUState *cpu = ENV_GET_CPU(env);
             /* The slow-path has been forced since we are writing to
              * exclusive-protected memory. */
@@ -500,12 +499,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
                 cpu_physical_memory_unset_excl(hw_addr, cpu->cpu_index);
             }
 
-            haddr = addr + env->tlb_table[mmu_idx][index].addend;
-        #if DATA_SIZE == 1
-            glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-        #else
-            glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-        #endif
+            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
+                glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
+                                                         mmu_idx, index,
+                                                         retaddr);
+            } else {
+                glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
+                                                        mmu_idx, 
index,retaddr);
+            }
 
             lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
 
@@ -624,9 +625,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
 
     /* Handle an IO access or exclusive access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
-
-        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
+        if (tlb_addr & TLB_EXCL) {
+            CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
             CPUState *cpu = ENV_GET_CPU(env);
             /* The slow-path has been forced since we are writing to
              * exclusive-protected memory. */
@@ -648,12 +648,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong 
addr, DATA_TYPE val,
                 cpu_physical_memory_unset_excl(hw_addr, cpu->cpu_index);
             }
 
-            haddr = addr + env->tlb_table[mmu_idx][index].addend;
-        #if DATA_SIZE == 1
-            glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
-        #else
-            glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
-        #endif
+            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
+                glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
+                                                         mmu_idx, index,
+                                                         retaddr);
+            } else {
+                glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
+                                                        mmu_idx, 
index,retaddr);
+            }
 
             lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
 
-- 
2.6.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]