[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v4 23/54] tcg: let plugins instrument virtual memory
From: |
Alex Bennée |
Subject: |
[Qemu-devel] [PATCH v4 23/54] tcg: let plugins instrument virtual memory accesses |
Date: |
Wed, 31 Jul 2019 17:06:48 +0100 |
From: "Emilio G. Cota" <address@hidden>
To capture all memory accesses we need hook into all the various
helper functions that are involved in memory operations as well as the
injected inline helper calls. A later commit will allow us to resolve
the actual guest HW addresses by replaying the lookup.
Signed-off-by: Emilio G. Cota <address@hidden>
[AJB: drop haddr handling, just deal in vaddr]
Signed-off-by: Alex Bennée <address@hidden>
---
v3
- fixes for cpu_neg()
v4
- rebase fixups, moved cpu_neg() fixes down the stack
- drop haddr support - will be restored with later helper
- reword commit
---
accel/tcg/atomic_common.inc.c | 4 +++
accel/tcg/atomic_template.h | 1 +
accel/tcg/cpu-exec.c | 3 ++
accel/tcg/cputlb.c | 14 ++++----
include/exec/cpu-defs.h | 1 +
include/exec/cpu_ldst_template.h | 28 +++++++++-------
include/exec/cpu_ldst_useronly_template.h | 29 ++++++++--------
tcg/tcg-op.c | 40 ++++++++++++++++++-----
tcg/tcg.h | 1 +
9 files changed, 79 insertions(+), 42 deletions(-)
diff --git a/accel/tcg/atomic_common.inc.c b/accel/tcg/atomic_common.inc.c
index a86098fb2de..344525b0bb3 100644
--- a/accel/tcg/atomic_common.inc.c
+++ b/accel/tcg/atomic_common.inc.c
@@ -25,6 +25,8 @@ void atomic_trace_rmw_pre(CPUArchState *env, target_ulong
addr, uint16_t info)
static inline void
atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
}
static inline
@@ -36,6 +38,7 @@ void atomic_trace_ld_pre(CPUArchState *env, target_ulong
addr, uint16_t info)
static inline
void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
static inline
@@ -47,4 +50,5 @@ void atomic_trace_st_pre(CPUArchState *env, target_ulong
addr, uint16_t info)
static inline
void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info)
{
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
}
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index 34f891d4a62..4f209c8e18c 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -18,6 +18,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 16
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index ab9dfd4f908..81be72a373d 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -269,6 +269,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
+ qemu_plugin_disable_mem_helpers(cpu);
}
if (cpu_in_exclusive_context(cpu)) {
@@ -702,6 +703,8 @@ int cpu_exec(CPUState *cpu)
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
+ qemu_plugin_disable_mem_helpers(cpu);
+
assert_no_pages_locked();
}
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index e4ac06041a2..f7c0290639c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -881,7 +881,7 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int
size,
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
- MMUAccessType access_type, int size)
+ TCGMemOp mo, MMUAccessType access_type, int size)
{
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
@@ -925,7 +925,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry
*iotlbentry,
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, uint64_t val, target_ulong addr,
- uintptr_t retaddr, int size)
+ uintptr_t retaddr, TCGMemOp mo, int size)
{
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
@@ -1264,7 +1264,8 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
const MMUAccessType access_type =
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
- unsigned a_bits = get_alignment_bits(get_memop(oi));
+ TCGMemOp mo = get_memop(oi);
+ unsigned a_bits = get_alignment_bits(mo);
void *haddr;
uint64_t res;
@@ -1313,7 +1314,7 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
}
res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
- mmu_idx, addr, retaddr, access_type, size);
+ mmu_idx, addr, retaddr, mo, access_type, size);
return handle_bswap(res, size, big_endian);
}
@@ -1513,7 +1514,8 @@ store_helper(CPUArchState *env, target_ulong addr,
uint64_t val,
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(entry);
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
- unsigned a_bits = get_alignment_bits(get_memop(oi));
+ TCGMemOp mo = get_memop(oi);
+ unsigned a_bits = get_alignment_bits(mo);
void *haddr;
/* Handle CPU specific unaligned behaviour */
@@ -1562,7 +1564,7 @@ store_helper(CPUArchState *env, target_ulong addr,
uint64_t val,
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
handle_bswap(val, size, big_endian),
- addr, retaddr, size);
+ addr, retaddr, mo, size);
return;
}
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 9bc713a70b2..31deca369ea 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -215,6 +215,7 @@ typedef struct CPUTLBCommon {
* Since this is placed within CPUNegativeOffsetState, the smallest
* negative offsets are at the end of the struct.
*/
+
typedef struct CPUTLB {
CPUTLBCommon c;
CPUTLBDesc d[NB_MMU_MODES];
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
index 5750a26b9ec..6f0d3407979 100644
--- a/include/exec/cpu_ldst_template.h
+++ b/include/exec/cpu_ldst_template.h
@@ -28,6 +28,7 @@
#include "trace-root.h"
#endif
+#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 8
@@ -86,11 +87,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX),
_ra)(CPUArchState *env,
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
-
#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false,
mmu_idx);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
@@ -104,6 +103,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX),
_ra)(CPUArchState *env,
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
}
+#ifndef SOFTMMU_CODE_ACCESS
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
+#endif
return res;
}
@@ -124,11 +126,9 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX),
_ra)(CPUArchState *env,
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
-
#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false,
mmu_idx);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
@@ -142,6 +142,9 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX),
_ra)(CPUArchState *env,
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
}
+#ifndef SOFTMMU_CODE_ACCESS
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
+#endif
return res;
}
@@ -165,11 +168,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX),
_ra)(CPUArchState *env,
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
-
#if !defined(SOFTMMU_CODE_ACCESS)
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true,
mmu_idx);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
@@ -183,6 +184,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX),
_ra)(CPUArchState *env,
uintptr_t hostaddr = addr + entry->addend;
glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
}
+#ifndef SOFTMMU_CODE_ACCESS
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
+#endif
}
static inline void
diff --git a/include/exec/cpu_ldst_useronly_template.h
b/include/exec/cpu_ldst_useronly_template.h
index 8f7f117ad44..5cd0fa3b99d 100644
--- a/include/exec/cpu_ldst_useronly_template.h
+++ b/include/exec/cpu_ldst_useronly_template.h
@@ -64,18 +64,17 @@
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
-#ifdef CODE_ACCESS
RES_TYPE ret;
+#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
- return ret;
#else
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, false, 0));
- return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, 0);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
+ ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
#endif
+ return ret;
}
#ifndef CODE_ACCESS
@@ -96,18 +95,18 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX),
_ra)(CPUArchState *env,
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
-#ifdef CODE_ACCESS
int ret;
+#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
- return ret;
#else
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, true, MO_TE, false, 0));
- return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, 0);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
+ ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
+ return ret;
}
#ifndef CODE_ACCESS
@@ -130,10 +129,10 @@ static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
RES_TYPE v)
{
- trace_guest_mem_before_exec(
- env_cpu(env), ptr,
- trace_mem_build_info(SHIFT, false, MO_TE, true, 0));
+ uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, 0);
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
static inline void
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 38e62dcba97..b60b42e6410 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -30,6 +30,7 @@
#include "tcg-mo.h"
#include "trace-tcg.h"
#include "trace/mem.h"
+#include "exec/plugin-gen.h"
/* Reduce the number of ifdefs below. This assumes that all uses of
TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
@@ -2684,6 +2685,7 @@ void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx)
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
}
+ plugin_gen_disable_mem_helpers();
tcg_gen_op1i(INDEX_op_exit_tb, val);
}
@@ -2696,6 +2698,7 @@ void tcg_gen_goto_tb(unsigned idx)
tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
tcg_ctx->goto_tb_issue_mask |= 1 << idx;
#endif
+ plugin_gen_disable_mem_helpers();
/* When not chaining, we simply fall through to the "fallback" exit. */
if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
tcg_gen_op1i(INDEX_op_goto_tb, idx);
@@ -2705,7 +2708,10 @@ void tcg_gen_goto_tb(unsigned idx)
void tcg_gen_lookup_and_goto_ptr(void)
{
if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- TCGv_ptr ptr = tcg_temp_new_ptr();
+ TCGv_ptr ptr;
+
+ plugin_gen_disable_mem_helpers();
+ ptr = tcg_temp_new_ptr();
gen_helper_lookup_tb_ptr(ptr, cpu_env);
tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
tcg_temp_free_ptr(ptr);
@@ -2788,14 +2794,24 @@ static void tcg_gen_req_mo(TCGBar type)
}
}
+static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
+{
+#ifdef CONFIG_PLUGIN
+ if (tcg_ctx->plugin_insn == NULL) {
+ return;
+ }
+ plugin_gen_empty_mem_callback(vaddr, info);
+#endif
+}
+
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGMemOp orig_memop;
+ uint16_t info = trace_mem_get_info(memop, idx, 0);
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 0));
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
@@ -2807,6 +2823,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
}
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@@ -2828,11 +2845,11 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr,
TCGArg idx, TCGMemOp memop)
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGv_i32 swap = NULL;
+ uint16_t info = trace_mem_get_info(memop, idx, 1);
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 1));
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i32();
@@ -2852,6 +2869,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
}
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if (swap) {
tcg_temp_free_i32(swap);
@@ -2861,6 +2879,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGMemOp orig_memop;
+ uint16_t info;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
@@ -2874,8 +2893,8 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 1, 0);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 0));
+ info = trace_mem_get_info(memop, idx, 0);
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
@@ -2887,6 +2906,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
}
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
@@ -2914,6 +2934,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGv_i64 swap = NULL;
+ uint16_t info;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
@@ -2922,8 +2943,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
- trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
- addr, trace_mem_get_info(memop, idx, 1));
+ info = trace_mem_get_info(memop, idx, 1);
+ trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i64();
@@ -2947,6 +2968,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg
idx, TCGMemOp memop)
}
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
+ plugin_gen_mem_callbacks(addr, info);
if (swap) {
tcg_temp_free_i64(swap);
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 2385e758e55..93f52f4ca93 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -28,6 +28,7 @@
#include "cpu.h"
#include "exec/tb-context.h"
#include "qemu/bitops.h"
+#include "qemu/plugin.h"
#include "qemu/queue.h"
#include "tcg-mo.h"
#include "tcg-target.h"
--
2.20.1
- [Qemu-devel] [PATCH v4 40/54] target/xtensa: fetch code with translator_ld, (continued)
- [Qemu-devel] [PATCH v4 40/54] target/xtensa: fetch code with translator_ld, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 31/54] target/arm: fetch code with translator_ld, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 30/54] translator: add translator_ld{ub, sw, uw, l, q}, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 39/54] target/sparc: fetch code with translator_ld, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 18/54] cputlb: introduce get_page_addr_code_hostp, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 41/54] target/openrisc: fetch code with translator_ld, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 27/54] *-user: plugin syscalls, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 51/54] tests/plugin: add hotpages plugin to breakdown memory access patterns, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 25/54] translate-all: notify plugin code of tb_flush, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 50/54] tests/plugin: add instruction execution breakdown, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 23/54] tcg: let plugins instrument virtual memory accesses,
Alex Bennée <=
- [Qemu-devel] [PATCH v4 20/54] plugin-gen: add module for TCG-related code, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 17/54] cputlb: document get_page_addr_code, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 38/54] target/riscv: fetch code with translator_ld, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 36/54] target/m68k: fetch code with translator_ld, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 49/54] plugin: add qemu_plugin_insn_disas helper, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 52/54] accel/stubs: reduce headers from tcg-stub, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 47/54] tests/tcg: enable plugin testing, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 34/54] target/i386: fetch code with translator_ld, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 24/54] plugins: implement helpers for resolving hwaddr, Alex Bennée, 2019/07/31
- [Qemu-devel] [PATCH v4 48/54] tests/plugin: add a hotblocks plugin, Alex Bennée, 2019/07/31