[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v11 07/14] accel/tcg: convert profiling of code generation to TBS
From: |
Fei Wu |
Subject: |
[PATCH v11 07/14] accel/tcg: convert profiling of code generation to TBStats |
Date: |
Fri, 21 Apr 2023 21:24:14 +0800 |
From: Alex Bennée <alex.bennee@linaro.org>
We continue the conversion of CONFIG_PROFILER data to TBStats by
measuring the time it takes to generate code. Instead of calculating
elapsed time as we do we simply store key timestamps in the profiler
structure and then calculate the totals and add them to TBStats under
lock.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
accel/tcg/tb-stats.c | 33 -------------------
accel/tcg/translate-all.c | 69 ++++++++++++++++++++++-----------------
include/exec/tb-stats.h | 7 ++++
include/tcg/tcg.h | 14 ++++----
tcg/tcg.c | 17 ++++------
5 files changed, 59 insertions(+), 81 deletions(-)
diff --git a/accel/tcg/tb-stats.c b/accel/tcg/tb-stats.c
index a2438a1f51..01adbac2a0 100644
--- a/accel/tcg/tb-stats.c
+++ b/accel/tcg/tb-stats.c
@@ -88,39 +88,6 @@ void dump_jit_profile_info(TCGProfile *s, GString *buf)
jpi->host / (double) jpi->translations);
g_string_append_printf(buf, "avg search data/TB %0.1f\n",
jpi->search_data / (double) jpi->translations);
-
- if (s) {
- int64_t tot = s->interm_time + s->code_time;
- g_string_append_printf(buf, "JIT cycles %" PRId64
- " (%0.3f s at 2.4 GHz)\n",
- tot, tot / 2.4e9);
- g_string_append_printf(buf, "cycles/op %0.1f\n",
- jpi->ops ? (double)tot / jpi->ops : 0);
- g_string_append_printf(buf, "cycles/in byte %0.1f\n",
- jpi->guest ? (double)tot / jpi->guest : 0);
- g_string_append_printf(buf, "cycles/out byte %0.1f\n",
- jpi->host ? (double)tot / jpi->host : 0);
- g_string_append_printf(buf, "cycles/search byte %0.1f\n",
- jpi->search_data ? (double)tot / jpi->search_data : 0);
- if (tot == 0) {
- tot = 1;
- }
- g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
- (double)s->interm_time / tot * 100.0);
- g_string_append_printf(buf, " gen_code time %0.1f%%\n",
- (double)s->code_time / tot * 100.0);
- g_string_append_printf(buf, "optim./code time %0.1f%%\n",
- (double)s->opt_time / (s->code_time ? s->code_time : 1)
- * 100.0);
- g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
- (double)s->la_time / (s->code_time ? s->code_time : 1)
- * 100.0);
- g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
- s->restore_count);
- g_string_append_printf(buf, " avg cycles %0.1f\n",
- s->restore_count ?
- (double)s->restore_time / s->restore_count : 0);
- }
}
g_free(jpi);
}
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bf10987450..92285d0add 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -278,8 +278,9 @@ void page_init(void)
*/
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
target_ulong pc, void *host_pc,
- int *max_insns, int64_t *ti)
+ int *max_insns)
{
+ TCGProfile *prof = &tcg_ctx->prof;
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
if (unlikely(ret != 0)) {
return ret;
@@ -293,11 +294,9 @@ static int setjmp_gen_code(CPUArchState *env,
TranslationBlock *tb,
tcg_ctx->cpu = NULL;
*max_insns = tb->icount;
-#ifdef CONFIG_PROFILER
- qatomic_set(&tcg_ctx->prof.interm_time,
- tcg_ctx->prof.interm_time + profile_getclock() - *ti);
- *ti = profile_getclock();
-#endif
+ if (tb_stats_enabled(tb, TB_JIT_TIME)) {
+ prof->gen_ir_done_time = profile_getclock();
+ }
return tcg_gen_code(tcg_ctx, tb, pc);
}
@@ -348,7 +347,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
TCGProfile *prof = &tcg_ctx->prof;
- int64_t ti;
void *host_pc;
assert_memory_lock();
@@ -392,10 +390,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tcg_ctx->gen_tb = tb;
tb_overflow:
-#ifdef CONFIG_PROFILER
- ti = profile_getclock();
-#endif
-
trace_translate_block(tb, pc, tb->tc.ptr);
/*
@@ -407,11 +401,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
if (tb_stats_collection_enabled() &&
qemu_log_in_addr_range(tb->pc)) {
tb->tb_stats = tb_get_stats(phys_pc, pc, cs_base, flags);
+ if (tb_stats_enabled(tb, TB_JIT_TIME)) {
+ prof->gen_start_time = profile_getclock();
+ }
} else {
tb->tb_stats = NULL;
}
- gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
+ gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns);
if (unlikely(gen_code_size < 0)) {
switch (gen_code_size) {
case -1:
@@ -463,9 +460,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
*/
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
-#endif
+ if (tb_stats_enabled(tb, TB_JIT_TIME)) {
+ prof->gen_code_done_time = profile_getclock();
+ }
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
@@ -575,26 +572,38 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* Collect JIT stats when enabled. We batch them all up here to
* avoid spamming the cache with atomic accesses
*/
- if (tb_stats_enabled(tb, TB_JIT_STATS)) {
+ if (tb_stats_enabled(tb, (TB_JIT_STATS | TB_JIT_TIME))) {
TBStatistics *ts = tb->tb_stats;
qemu_mutex_lock(&ts->jit_stats_lock);
- ts->code.num_guest_inst += prof->translation.nb_guest_insns;
- ts->code.num_tcg_ops += prof->translation.nb_ops_pre_opt;
- ts->code.num_tcg_ops_opt += tcg_ctx->nb_ops;
- ts->code.spills += prof->translation.nb_spills;
- ts->code.temps += prof->translation.temp_count;
- ts->code.deleted_ops += prof->translation.del_op_count;
- ts->code.in_len += tb->size;
- ts->code.out_len += tb->tc.size;
- ts->code.search_out_len += search_size;
-
- ts->translations.total++;
- if (tb_page_addr1(tb) != -1) {
- ts->translations.spanning++;
+ if (tb_stats_enabled(tb, TB_JIT_STATS)) {
+ ts->code.num_guest_inst += prof->translation.nb_guest_insns;
+ ts->code.num_tcg_ops += prof->translation.nb_ops_pre_opt;
+ ts->code.num_tcg_ops_opt += tcg_ctx->nb_ops;
+ ts->code.spills += prof->translation.nb_spills;
+ ts->code.temps += prof->translation.temp_count;
+ ts->code.deleted_ops += prof->translation.del_op_count;
+ ts->code.in_len += tb->size;
+ ts->code.out_len += tb->tc.size;
+ ts->code.search_out_len += search_size;
+
+ ts->translations.total++;
+ if (tb_page_addr1(tb) != -1) {
+ ts->translations.spanning++;
+ }
+
+ g_ptr_array_add(ts->tbs, tb);
}
- g_ptr_array_add(ts->tbs, tb);
+ if (tb_stats_enabled(tb, TB_JIT_TIME)) {
+ ts->gen_times.ir += prof->gen_ir_done_time - prof->gen_start_time;
+ ts->gen_times.ir_opt +=
+ prof->gen_opt_done_time - prof->gen_ir_done_time;
+ ts->gen_times.la +=
+ prof->gen_la_done_time - prof->gen_opt_done_time;
+ ts->gen_times.code +=
+ prof->gen_code_done_time - prof->gen_la_done_time;
+ }
qemu_mutex_unlock(&ts->jit_stats_lock);
}
diff --git a/include/exec/tb-stats.h b/include/exec/tb-stats.h
index 80314c50f9..a23b6320bd 100644
--- a/include/exec/tb-stats.h
+++ b/include/exec/tb-stats.h
@@ -96,6 +96,12 @@ struct TBStatistics {
uint64_t tb_restore_time;
uint64_t tb_restore_count;
+ struct {
+ uint64_t ir;
+ uint64_t ir_opt;
+ uint64_t la;
+ uint64_t code;
+ } gen_times;
};
bool tb_stats_cmp(const void *ap, const void *bp);
@@ -103,6 +109,7 @@ bool tb_stats_cmp(const void *ap, const void *bp);
void init_tb_stats_htable(void);
void dump_jit_profile_info(TCGProfile *s, GString *buf);
+void dump_jit_exec_time_info(uint64_t dev_time);
#define TB_NOTHING (1 << 0)
#define TB_EXEC_STATS (1 << 1)
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index 645dbaa563..abad5d6a70 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -557,13 +557,13 @@ typedef struct TCGProfile {
int64_t code_in_len;
int64_t code_out_len;
int64_t search_out_len;
- int64_t interm_time;
- int64_t code_time;
- int64_t la_time;
- int64_t opt_time;
- int64_t restore_count;
- int64_t restore_time;
- int64_t table_op_count[NB_OPS];
+
+ /* Timestamps during translation */
+ uint64_t gen_start_time;
+ uint64_t gen_ir_done_time;
+ uint64_t gen_opt_done_time;
+ uint64_t gen_la_done_time;
+ uint64_t gen_code_done_time;
} TCGProfile;
struct TCGContext {
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 6f46c87dc1..716afbd980 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -4957,18 +4957,13 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb,
target_ulong pc_start)
}
#endif
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
-#endif
-
#ifdef USE_TCG_OPTIMIZATIONS
tcg_optimize(s);
#endif
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
- qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
-#endif
+ if (tb_stats_enabled(tb, TB_JIT_TIME)) {
+ prof->gen_opt_done_time = profile_getclock();
+ }
reachable_code_pass(s);
liveness_pass_0(s);
@@ -4994,9 +4989,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb,
target_ulong pc_start)
}
}
-#ifdef CONFIG_PROFILER
- qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
-#endif
+ if (tb_stats_enabled(tb, TB_JIT_TIME)) {
+ prof->gen_la_done_time = profile_getclock();
+ }
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
--
2.25.1
- [PATCH v11 00/14] TCG code quality tracking, Fei Wu, 2023/04/21
- [PATCH v11 01/14] accel/tcg: introduce TBStatistics structure, Fei Wu, 2023/04/21
- [PATCH v11 02/14] accel: collecting TB execution count, Fei Wu, 2023/04/21
- [PATCH v11 03/14] accel: collecting JIT statistics, Fei Wu, 2023/04/21
- [PATCH v11 04/14] accel: replacing part of CONFIG_PROFILER with TBStats, Fei Wu, 2023/04/21
- [PATCH v11 05/14] accel/tcg: move profiler dev_time to tb_stats, Fei Wu, 2023/04/21
- [PATCH v11 06/14] accel/tcg: convert profiling of restore operations to TBStats, Fei Wu, 2023/04/21
- [PATCH v11 07/14] accel/tcg: convert profiling of code generation to TBStats,
Fei Wu <=
- [PATCH v11 08/14] accel: adding TB_JIT_TIME and full replacing CONFIG_PROFILER, Fei Wu, 2023/04/21
- [PATCH v11 09/14] debug: add -d tb_stats to control TBStatistics collection:, Fei Wu, 2023/04/21
- [PATCH v11 10/14] monitor: adding tb_stats hmp command, Fei Wu, 2023/04/21
- [PATCH v11 11/14] tb-stats: reset the tracked TBs on a tb_flush, Fei Wu, 2023/04/21
- [PATCH v11 12/14] Adding info [tb-list|tb] commands to HMP (WIP), Fei Wu, 2023/04/21
- [PATCH v11 13/14] tb-stats: dump hot TBs at the end of the execution, Fei Wu, 2023/04/21
- [PATCH v11 14/14] configure: remove the final bits of --profiler support, Fei Wu, 2023/04/21
- Re: [PATCH v11 00/14] TCG code quality tracking, Alex Bennée, 2023/04/21