qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 11/27] accel/tcg: Move jmp-cache `CF_PCREL` checks to caller


From: Anton Johansson
Subject: [PATCH v2 11/27] accel/tcg: Move jmp-cache `CF_PCREL` checks to caller
Date: Tue, 21 Feb 2023 23:18:02 +0100

tb-jmp-cache.h contains a few small functions that only exist to hide a
CF_PCREL check, however the caller often already performs such a check.

This patch moves CF_PCREL checks from the callee to the caller, and also
removes these functions which now only hide an access of the jmp-cache.

Signed-off-by: Anton Johansson <anjo@rev.ng>
---
 accel/tcg/cpu-exec.c     | 56 +++++++++++++++++++++++++++++-----------
 accel/tcg/tb-jmp-cache.h | 36 --------------------------
 2 files changed, 41 insertions(+), 51 deletions(-)

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 92b833adcf..5efa8bf42a 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -256,21 +256,46 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, 
target_ulong pc,
 
     hash = tb_jmp_cache_hash_func(pc);
     jc = cpu->tb_jmp_cache;
-    tb = tb_jmp_cache_get_tb(jc, cflags, hash);
-
-    if (likely(tb &&
-               tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
-               tb->cs_base == cs_base &&
-               tb->flags == flags &&
-               tb->trace_vcpu_dstate == *cpu->trace_dstate &&
-               tb_cflags(tb) == cflags)) {
-        return tb;
-    }
-    tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
-    if (tb == NULL) {
-        return NULL;
+
+    if (cflags & CF_PCREL) {
+        /* Use acquire to ensure current load of pc from jc. */
+        tb =  qatomic_load_acquire(&jc->array[hash].tb);
+
+        if (likely(tb &&
+                   jc->array[hash].pc == pc &&
+                   tb->cs_base == cs_base &&
+                   tb->flags == flags &&
+                   tb->trace_vcpu_dstate == *cpu->trace_dstate &&
+                   tb_cflags(tb) == cflags)) {
+            return tb;
+        }
+        tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+        if (tb == NULL) {
+            return NULL;
+        }
+        jc->array[hash].pc = pc;
+        /* Use store_release on tb to ensure pc is written first. */
+        qatomic_store_release(&jc->array[hash].tb, tb);
+    } else {
+        /* Use rcu_read to ensure current load of pc from *tb. */
+        tb = qatomic_rcu_read(&jc->array[hash].tb);
+
+        if (likely(tb &&
+                   tb_pc(tb) == pc &&
+                   tb->cs_base == cs_base &&
+                   tb->flags == flags &&
+                   tb->trace_vcpu_dstate == *cpu->trace_dstate &&
+                   tb_cflags(tb) == cflags)) {
+            return tb;
+        }
+        tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+        if (tb == NULL) {
+            return NULL;
+        }
+        /* Use the pc value already stored in tb->pc. */
+        qatomic_set(&jc->array[hash].tb, tb);
     }
-    tb_jmp_cache_set(jc, hash, tb, pc);
+
     return tb;
 }
 
@@ -959,7 +984,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
                  * for the fast lookup
                  */
                 h = tb_jmp_cache_hash_func(pc);
-                tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
+                /* Use the pc value already stored in tb->pc. */
+                qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
             }
 
 #ifndef CONFIG_USER_ONLY
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
index 083939b302..bee87eb840 100644
--- a/accel/tcg/tb-jmp-cache.h
+++ b/accel/tcg/tb-jmp-cache.h
@@ -25,40 +25,4 @@ struct CPUJumpCache {
     } array[TB_JMP_CACHE_SIZE];
 };
 
-static inline TranslationBlock *
-tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t cflags, uint32_t hash)
-{
-    if (cflags & CF_PCREL) {
-        /* Use acquire to ensure current load of pc from jc. */
-        return qatomic_load_acquire(&jc->array[hash].tb);
-    } else {
-        /* Use rcu_read to ensure current load of pc from *tb. */
-        return qatomic_rcu_read(&jc->array[hash].tb);
-    }
-}
-
-static inline target_ulong
-tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
-{
-    if (tb_cflags(tb) & CF_PCREL) {
-        return jc->array[hash].pc;
-    } else {
-        return tb_pc(tb);
-    }
-}
-
-static inline void
-tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
-                 TranslationBlock *tb, target_ulong pc)
-{
-    if (tb_cflags(tb) & CF_PCREL) {
-        jc->array[hash].pc = pc;
-        /* Use store_release on tb to ensure pc is written first. */
-        qatomic_store_release(&jc->array[hash].tb, tb);
-    } else{
-        /* Use the pc value already stored in tb->pc. */
-        qatomic_set(&jc->array[hash].tb, tb);
-    }
-}
-
 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */
-- 
2.39.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]