qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH v5 08/10] Adding info [tbs|tb|coverset] commands


From: Vanderson Martins do Rosario
Subject: Re: [Qemu-devel] [PATCH v5 08/10] Adding info [tbs|tb|coverset] commands to HMP. These commands allow the exploration of TBs generated by the TCG. Understand which one hotter, with more guest/host instructions... and examine their guest, host and IR code.
Date: Wed, 21 Aug 2019 11:16:08 -0300

Do you think that "info tb-list 15" as a command to list 15 TBs make more
sense than "info tbs 15"?

It would be "into tb id" to investigate a specific TB and "into tb-list
number" to list the TBs.

Vanderson M. Rosario


On Thu, Aug 15, 2019 at 6:00 AM Dr. David Alan Gilbert <address@hidden>
wrote:

> * vandersonmr (address@hidden) wrote:
> > The goal of this command is to allow the dynamic exploration
> > of TCG behavior and code quality. Therefore, for now, a
> > corresponding QMP command is not worthwhile.
> >
> > Signed-off-by: Vanderson M. do Rosario <address@hidden>
> > ---
> >  accel/tcg/tb-stats.c         | 398 ++++++++++++++++++++++++++++++++++-
> >  accel/tcg/translate-all.c    |   2 +-
> >  disas.c                      |  31 ++-
> >  hmp-commands-info.hx         |  24 +++
> >  include/exec/tb-stats.h      |  43 +++-
> >  include/qemu/log-for-trace.h |   4 +
> >  include/qemu/log.h           |   2 +
> >  monitor/misc.c               |  74 +++++++
> >  util/log.c                   |  52 ++++-
> >  9 files changed, 609 insertions(+), 21 deletions(-)
> >
> > diff --git a/accel/tcg/tb-stats.c b/accel/tcg/tb-stats.c
> > index f28fd7b434..f5e519bdb7 100644
> > --- a/accel/tcg/tb-stats.c
> > +++ b/accel/tcg/tb-stats.c
> > @@ -11,9 +11,36 @@
> >
> >  /* only accessed in safe work */
> >  static GList *last_search;
> > -
> > +int id = 1; /* display_id increment counter */
> >  uint64_t dev_time;
> >
> > +static TBStatistics *get_tbstats_by_id(int id)
> > +{
> > +    GList *iter;
> > +
> > +    for (iter = last_search; iter; iter = g_list_next(iter)) {
> > +        TBStatistics *tbs = iter->data;
> > +        if (tbs && tbs->display_id == id) {
> > +            return tbs;
> > +            break;
> > +        }
> > +    }
> > +    return NULL;
> > +}
> > +
> > +static TBStatistics *get_tbstats_by_addr(target_ulong pc)
> > +{
> > +    GList *iter;
> > +    for (iter = last_search; iter; iter = g_list_next(iter)) {
> > +        TBStatistics *tbs = iter->data;
> > +        if (tbs && tbs->pc == pc) {
> > +            return tbs;
> > +            break;
> > +        }
> > +    }
> > +    return NULL;
> > +}
> > +
> >  struct jit_profile_info {
> >      uint64_t translations;
> >      uint64_t aborted;
> > @@ -155,6 +182,7 @@ static void clean_tbstats(void)
> >      qht_destroy(&tb_ctx.tb_stats);
> >  }
> >
> > +
> >  void do_hmp_tbstats_safe(CPUState *cpu, run_on_cpu_data icmd)
> >  {
> >      struct TbstatsCommand *cmdinfo = icmd.host_ptr;
> > @@ -242,6 +270,374 @@ void init_tb_stats_htable_if_not(void)
> >      }
> >  }
> >
> > +static void collect_tb_stats(void *p, uint32_t hash, void *userp)
> > +{
> > +    last_search = g_list_prepend(last_search, p);
> > +}
> > +
> > +static void dump_tb_targets(TBStatistics *tbs)
> > +{
> > +    if (tbs && tbs->tb) {
> > +        uintptr_t dst1 = atomic_read(tbs->tb->jmp_dest);
> > +        uintptr_t dst2 = atomic_read(tbs->tb->jmp_dest + 1);
> > +        TranslationBlock* tb_dst1 = dst1 > 1 ? (TranslationBlock *)
> dst1 : 0;
> > +        TranslationBlock* tb_dst2 = dst2 > 1 ? (TranslationBlock *)
> dst2 : 0;
> > +        target_ulong pc1 = tb_dst1 ? tb_dst1->pc : 0;
> > +        target_ulong pc2 = tb_dst2 ? tb_dst2->pc : 0;
> > +
> > +        /* if there is no display id from the last_search, then create
> one */
> > +        TBStatistics *tbstats_pc1 = get_tbstats_by_addr(pc1);
> > +        TBStatistics *tbstats_pc2 = get_tbstats_by_addr(pc2);
> > +
> > +        if (!tbstats_pc1 && tb_dst1 && tb_dst1->tb_stats) {
> > +            last_search = g_list_append(last_search, tb_dst1->tb_stats);
> > +            tbstats_pc1 = tb_dst1->tb_stats;
> > +        }
> > +
> > +        if (!tbstats_pc2 && tb_dst2 && tb_dst2->tb_stats) {
> > +            last_search = g_list_append(last_search, tb_dst2->tb_stats);
> > +            tbstats_pc2 = tb_dst2->tb_stats;
> > +        }
> > +
> > +        if (tbstats_pc1 && tbstats_pc1->display_id == 0) {
> > +            tbstats_pc1->display_id = id++;
> > +        }
> > +
> > +        if (tbstats_pc2 && tbstats_pc2->display_id == 0) {
> > +            tbstats_pc2->display_id = id++;
> > +        }
> > +
> > +        if (pc1 && !pc2) {
> > +            qemu_log("\t| targets: 0x"TARGET_FMT_lx" (id:%d)\n",
> > +                    pc1, tb_dst1 ? tbstats_pc1->display_id : -1);
> > +        } else if (pc1 && pc2) {
> > +            qemu_log("\t| targets: 0x"TARGET_FMT_lx" (id:%d), "
> > +                     "0x"TARGET_FMT_lx" (id:%d)\n",
> > +                    pc1, tb_dst1 ? tbstats_pc1->display_id : -1,
> > +                    pc2, tb_dst2 ? tbstats_pc2->display_id : -1);
> > +        } else {
> > +            qemu_log("\t| targets: no direct target\n");
> > +        }
> > +    }
> > +}
> > +
> > +static void dump_tb_header(TBStatistics *tbs)
> > +{
> > +    unsigned g = stat_per_translation(tbs, code.num_guest_inst);
> > +    unsigned ops = stat_per_translation(tbs, code.num_tcg_ops);
> > +    unsigned ops_opt = stat_per_translation(tbs, code.num_tcg_ops_opt);
> > +    unsigned spills = stat_per_translation(tbs, code.spills);
> > +    unsigned h = stat_per_translation(tbs, code.out_len);
> > +
> > +    float guest_host_prop = g ? ((float) h / g) : 0;
> > +
> > +    qemu_log("TB id:%d | phys:0x"TB_PAGE_ADDR_FMT" virt:0x"TARGET_FMT_lx
> > +             " flags:%#08x\n", tbs->display_id, tbs->phys_pc, tbs->pc,
> tbs->flags);
> > +
> > +    if (tbs_stats_enabled(tbs, TB_EXEC_STATS)) {
> > +        qemu_log("\t| exec:%lu/%lu\n", tbs->executions.normal,
> tbs->executions.atomic);
> > +    }
> > +
> > +    if (tbs_stats_enabled(tbs, TB_JIT_STATS)) {
> > +        qemu_log("\t| trans:%lu ints: g:%u op:%u op_opt:%u spills:%d"
> > +             "\n\t| h/g (host bytes / guest insts): %f\n",
> > +             tbs->translations.total, g, ops, ops_opt, spills,
> guest_host_prop);
> > +    }
> > +
> > +    if (tbs_stats_enabled(tbs, TB_JIT_TIME)) {
> > +        qemu_log("\t| time to gen at 2.4GHz => code:%0.2lf(ns)
> IR:%0.2lf(ns)\n",
> > +             tbs->time.code / 2.4, tbs->time.interm / 2.4);
> > +    }
> > +
> > +    dump_tb_targets(tbs);
> > +    qemu_log("\n");
> > +}
> > +
> > +static gint
> > +inverse_sort_tbs(gconstpointer p1, gconstpointer p2, gpointer psort_by)
> > +{
> > +    const TBStatistics *tbs1 = (TBStatistics *) p1;
> > +    const TBStatistics *tbs2 = (TBStatistics *) p2;
> > +    int sort_by = *((int *) psort_by);
> > +    unsigned long c1 = 0;
> > +    unsigned long c2 = 0;
> > +
> > +    if (likely(sort_by == SORT_BY_SPILLS)) {
> > +        c1 = stat_per_translation(tbs1, code.spills);
> > +        c2 = stat_per_translation(tbs2, code.spills);
> > +    } else if (likely(sort_by == SORT_BY_HOTNESS)) {
> > +        c1 = stat_per_translation(tbs1, executions.normal);
> > +        c2 = stat_per_translation(tbs2, executions.normal);
> > +    } else if (likely(sort_by == SORT_BY_HG)) {
> > +        if (tbs1->code.num_guest_inst == 0) {
> > +            return -1;
> > +        }
> > +        if (tbs2->code.num_guest_inst == 0) {
> > +            return 1;
> > +        }
> > +
> > +        float a =
> > +            (float) stat_per_translation(tbs1, code.out_len) /
> tbs1->code.num_guest_inst;
> > +        float b =
> > +            (float) stat_per_translation(tbs2, code.out_len) /
> tbs2->code.num_guest_inst;
> > +        c1 = a <= b ? 0 : 1;
> > +        c2 = a <= b ? 1 : 0;
> > +    }
> > +
> > +    return c1 < c2 ? 1 : c1 == c2 ? 0 : -1;
> > +}
> > +
> > +static void dump_last_search_headers(int count)
> > +{
> > +    if (!last_search) {
> > +        qemu_log("No data collected yet\n");
> > +        return;
> > +    }
> > +
> > +    GList *i;
> > +    for (i = last_search; i && count--; i = i->next) {
> > +        TBStatistics *tbs = (TBStatistics *) i->data;
> > +        dump_tb_header(tbs);
> > +    }
> > +}
> > +
> > +static void do_dump_coverset_info(int percentage)
> > +{
> > +    uint64_t total_exec_count = 0;
> > +    uint64_t covered_exec_count = 0;
> > +    unsigned coverset_size = 0;
> > +    id = 1;
> > +    GList *i;
> > +
> > +    g_list_free(last_search);
> > +    last_search = NULL;
> > +
> > +    qht_iter(&tb_ctx.tb_stats, collect_tb_stats, NULL);
> > +
> > +    int sort_by = SORT_BY_HOTNESS;
> > +    last_search = g_list_sort_with_data(last_search, inverse_sort_tbs,
> &sort_by);
> > +
> > +    if (!last_search) {
> > +        qemu_log("No data collected yet\n");
> > +        return;
> > +    }
> > +
> > +    /* Compute total execution count for all tbs */
> > +    for (i = last_search; i; i = i->next) {
> > +        TBStatistics *tbs = (TBStatistics *) i->data;
> > +        total_exec_count += tbs->executions.normal *
> tbs->code.num_guest_inst;
> > +    }
> > +
> > +    for (i = last_search; i; i = i->next) {
> > +        TBStatistics *tbs = (TBStatistics *) i->data;
> > +        covered_exec_count += tbs->executions.normal *
> tbs->code.num_guest_inst;
> > +        tbs->display_id = id++;
> > +        coverset_size++;
> > +
> > +        /* Iterate and display tbs until reach the percentage count
> cover */
> > +        if ((covered_exec_count * 100) / total_exec_count > percentage)
> {
> > +            break;
> > +        }
> > +    }
> > +
> > +    qemu_log("\n------------------------------\n");
> > +    qemu_log("# of TBs to reach %d%% of the total of guest insts exec:
> %u\t",
> > +                percentage, coverset_size);
> > +    qemu_log("Total of guest insts exec: %lu\n", total_exec_count);
> > +    qemu_log("\n------------------------------\n");
> > +
> > +    /* free the unused bits */
> > +    if (i) {
> > +        if (i->next) {
> > +            i->next->prev = NULL;
> > +        }
> > +        g_list_free(i->next);
> > +        i->next = NULL;
> > +    }
> > +
> > +    dump_last_search_headers(coverset_size);
> > +}
> > +
> > +static void do_dump_tbs_info(int total, int sort_by)
> > +{
> > +    id = 1;
> > +    GList *i;
> > +    int count = total;
> > +
> > +    g_list_free(last_search);
> > +    last_search = NULL;
> > +
> > +    qht_iter(&tb_ctx.tb_stats, collect_tb_stats, NULL);
> > +
> > +    last_search = g_list_sort_with_data(last_search, inverse_sort_tbs,
> > +                                        &sort_by);
> > +
> > +
> > +    if (!last_search) {
> > +        qemu_printf("No data collected yet!\n");
> > +        return;
> > +    }
> > +
> > +    for (i = last_search; i && count--; i = i->next) {
> > +        TBStatistics *tbs = (TBStatistics *) i->data;
> > +        tbs->display_id = id++;
> > +    }
> > +
> > +    /* free the unused bits */
> > +    if (i) {
> > +        if (i->next) {
> > +            i->next->prev = NULL;
> > +        }
> > +        g_list_free(i->next);
> > +        i->next = NULL;
> > +    }
> > +
> > +    dump_last_search_headers(total);
> > +}
> > +
> > +static void
> > +do_dump_coverset_info_safe(CPUState *cpu, run_on_cpu_data percentage)
> > +{
> > +    qemu_log_to_monitor(true);
> > +    do_dump_coverset_info(percentage.host_int);
> > +    qemu_log_to_monitor(false);
> > +}
> > +
> > +struct tbs_dump_info {
> > +    int count;
> > +    int sort_by;
> > +};
> > +
> > +static void do_dump_tbs_info_safe(CPUState *cpu, run_on_cpu_data tbdi)
> > +{
> > +    struct tbs_dump_info *info = tbdi.host_ptr;
> > +    qemu_log_to_monitor(true);
> > +    do_dump_tbs_info(info->count, info->sort_by);
> > +    qemu_log_to_monitor(false);
> > +    g_free(info);
> > +}
> > +
> > +/*
> > + * When we dump_tbs_info on a live system via the HMP we want to
> > + * ensure the system is quiessent before we start outputting stuff.
> > + * Otherwise we could pollute the output with other logging output.
> > + */
> > +void dump_coverset_info(int percentage, bool use_monitor)
> > +{
> > +    if (use_monitor) {
> > +        async_safe_run_on_cpu(first_cpu, do_dump_coverset_info_safe,
> > +                              RUN_ON_CPU_HOST_INT(percentage));
> > +    } else {
> > +        do_dump_coverset_info(percentage);
> > +    }
> > +}
> > +
> > +void dump_tbs_info(int count, int sort_by, bool use_monitor)
> > +{
> > +    if (use_monitor) {
> > +        struct tbs_dump_info *tbdi = g_new(struct tbs_dump_info, 1);
> > +        tbdi->count = count;
> > +        tbdi->sort_by = sort_by;
> > +        async_safe_run_on_cpu(first_cpu, do_dump_tbs_info_safe,
> > +                              RUN_ON_CPU_HOST_PTR(tbdi));
> > +    } else {
> > +        do_dump_tbs_info(count, sort_by);
> > +    }
> > +}
> > +
> > +static GString *get_code_string(TBStatistics *tbs, int log_flags)
> > +{
> > +    int old_log_flags = qemu_loglevel;
> > +
> > +    CPUState *cpu = first_cpu;
> > +    uint32_t cflags = curr_cflags() | CF_NOCACHE;
> > +    TranslationBlock *tb = NULL;
> > +
> > +    GString *code_s = g_string_new(NULL);
> > +    qemu_log_to_string(true, code_s);
> > +
> > +    qemu_set_log(log_flags);
> > +
> > +    if (sigsetjmp(cpu->jmp_env, 0) == 0) {
> > +        mmap_lock();
> > +        tb = tb_gen_code(cpu, tbs->pc, tbs->cs_base, tbs->flags,
> cflags);
> > +        tb_phys_invalidate(tb, -1);
> > +        mmap_unlock();
> > +    } else {
> > +        /*
> > +         * The mmap_lock is dropped by tb_gen_code if it runs out of
> > +         * memory.
> > +         */
> > +        fprintf(stderr, "%s: dbg failed!\n", __func__);
> > +        qemu_log("\ncould not gen code for this TB\n");
> > +        assert_no_pages_locked();
> > +    }
> > +
> > +    qemu_set_log(old_log_flags);
> > +    qemu_log_to_string(false, NULL);
> > +
> > +    if (tb) {
> > +        tcg_tb_remove(tb);
> > +    }
> > +
> > +    return code_s;
> > +}
> > +
> > +static void do_tb_dump_with_statistics(TBStatistics *tbs, int log_flags)
> > +{
> > +    qemu_log("\n------------------------------\n\n");
> > +    dump_tb_header(tbs);
> > +
> > +    GString *code_s = get_code_string(tbs, log_flags);
> > +    qemu_log("%s", code_s->str);
> > +    g_string_free(code_s, true);
> > +    qemu_log("------------------------------\n");
> > +}
> > +
> > +struct tb_dump_info {
> > +    int id;
> > +    int log_flags;
> > +    bool use_monitor;
> > +};
> > +
> > +static void do_dump_tb_info_safe(CPUState *cpu, run_on_cpu_data info)
> > +{
> > +    struct tb_dump_info *tbdi = (struct tb_dump_info *) info.host_ptr;
> > +
> > +    if (!last_search) {
> > +        qemu_log("no search on record\n");
> > +        return;
> > +    }
> > +
> > +    qemu_log_to_monitor(tbdi->use_monitor);
> > +
> > +    TBStatistics *tbs = get_tbstats_by_id(tbdi->id);
> > +    if (tbs) {
> > +        do_tb_dump_with_statistics(tbs, tbdi->log_flags);
> > +    } else {
> > +        qemu_log("no TB statitics found with id %d\n", tbdi->id);
> > +    }
> > +
> > +    qemu_log_to_monitor(false);
> > +
> > +    g_free(tbdi);
> > +}
> > +
> > +void dump_tb_info(int id, int log_mask, bool use_monitor)
> > +{
> > +    struct tb_dump_info *tbdi = g_new(struct tb_dump_info, 1);
> > +
> > +    tbdi->id = id;
> > +    tbdi->log_flags = log_mask;
> > +    tbdi->use_monitor = use_monitor;
> > +
> > +    async_safe_run_on_cpu(first_cpu, do_dump_tb_info_safe,
> > +                          RUN_ON_CPU_HOST_PTR(tbdi));
> > +
> > +    /* tbdi free'd by do_dump_tb_info_safe */
> > +}
> > +
> > +
> >  void enable_collect_tb_stats(void)
> >  {
> >      init_tb_stats_htable_if_not();
> > diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
> > index b94a1d67b6..0807411a8c 100644
> > --- a/accel/tcg/translate-all.c
> > +++ b/accel/tcg/translate-all.c
> > @@ -1779,7 +1779,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
> >       * generation so we can count interesting things about this
> >       * generation.
> >       */
> > -    if (tb_stats_collection_enabled()) {
> > +    if (tb_stats_collection_enabled() && !(tb->cflags & CF_NOCACHE)) {
> >          tb->tb_stats = tb_get_stats(phys_pc, pc, cs_base, flags, tb);
> >          uint32_t flag = get_default_tbstats_flag();
> >
> > diff --git a/disas.c b/disas.c
> > index 3e2bfa572b..d5292d4246 100644
> > --- a/disas.c
> > +++ b/disas.c
> > @@ -8,6 +8,8 @@
> >  #include "disas/disas.h"
> >  #include "disas/capstone.h"
> >
> > +#include "qemu/log-for-trace.h"
> > +
> >  typedef struct CPUDebug {
> >      struct disassemble_info info;
> >      CPUState *cpu;
> > @@ -420,6 +422,22 @@ static bool cap_disas_monitor(disassemble_info
> *info, uint64_t pc, int count)
> >  # define cap_disas_monitor(i, p, c)  false
> >  #endif /* CONFIG_CAPSTONE */
> >
> > +static int fprintf_log(struct _IO_FILE *a, const char *b, ...)
> > +{
> > +    va_list ap;
> > +    va_start(ap, b);
> > +
> > +    if (!to_string) {
> > +        vfprintf(a, b, ap);
> > +    } else {
> > +        qemu_vlog(b, ap);
> > +    }
> > +
> > +    va_end(ap);
> > +
> > +    return 1;
> > +}
> > +
> >  /* Disassemble this for me please... (debugging).  */
> >  void target_disas(FILE *out, CPUState *cpu, target_ulong code,
> >                    target_ulong size)
> > @@ -429,7 +447,7 @@ void target_disas(FILE *out, CPUState *cpu,
> target_ulong code,
> >      int count;
> >      CPUDebug s;
> >
> > -    INIT_DISASSEMBLE_INFO(s.info, out, fprintf);
> > +    INIT_DISASSEMBLE_INFO(s.info, out, fprintf_log);
> >
> >      s.cpu = cpu;
> >      s.info.read_memory_func = target_read_memory;
> > @@ -460,11 +478,12 @@ void target_disas(FILE *out, CPUState *cpu,
> target_ulong code,
> >      }
> >
> >      for (pc = code; size > 0; pc += count, size -= count) {
> > -     fprintf(out, "0x" TARGET_FMT_lx ":  ", pc);
> > -     count = s.info.print_insn(pc, &s.info);
> > -     fprintf(out, "\n");
> > -     if (count < 0)
> > -         break;
> > +        fprintf_log(out, "0x" TARGET_FMT_lx ":  ", pc);
> > +        count = s.info.print_insn(pc, &s.info);
> > +        fprintf_log(out, "\n");
> > +        if (count < 0) {
> > +            break;
> > +        }
> >          if (size < count) {
> >              fprintf(out,
> >                      "Disassembler disagrees with translator over
> instruction "
> > diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx
> > index c59444c461..6691303c59 100644
> > --- a/hmp-commands-info.hx
> > +++ b/hmp-commands-info.hx
> > @@ -289,6 +289,30 @@ ETEXI
> >          .help       = "show dynamic compiler info",
> >          .cmd        = hmp_info_jit,
> >      },
> > +    {
> > +        .name       = "tbs",
> > +        .args_type  = "number:i?,sortedby:s?",
> > +        .params     = "[number sortedby]",
> > +        .help       = "show a [number] translated blocks sorted by
> [sortedby]"
> > +                      "sortedby opts: hotness hg spills",
> > +        .cmd        = hmp_info_tbs,
> > +    },
> > +    {
> > +        .name       = "tb",
>
> Having two commands 'tb' and 'tbs' is pretty confusing.
>
> > +        .args_type  = "id:i,flags:s?",
> > +        .params     = "id [log1[,...] flags]",
>
> What's that '[,...]' represent? That's just asking for an integer isn't
> it?
>
> > +        .help       = "show information about one translated block by
> id."
> > +                      "a dump flag can be used to set dump code level:
> out_asm in_asm op",
> > +        .cmd        = hmp_info_tb,
> > +    },
> > +    {
> > +        .name       = "coverset",
> > +        .args_type  = "coverage:i?",
> > +        .params     = "[coverage]",
> > +        .help       = "show hottest translated blocks neccesary to
> cover"
> > +                      "[coverage]% of the execution count",
> > +        .cmd        = hmp_info_coverset,
> > +    },
> >  #endif
> >
> >  STEXI
> > diff --git a/include/exec/tb-stats.h b/include/exec/tb-stats.h
> > index 9271b90924..dc2a8155a0 100644
> > --- a/include/exec/tb-stats.h
> > +++ b/include/exec/tb-stats.h
> > @@ -9,8 +9,11 @@
> >  enum SortBy { SORT_BY_HOTNESS, SORT_BY_HG /* Host/Guest */,
> SORT_BY_SPILLS };
> >  enum TbstatsCmd { START, PAUSE, STOP, FILTER };
> >
> > +#define tbs_stats_enabled(tbs, JIT_STATS) \
> > +    (tbs && (tbs->stats_enabled & JIT_STATS))
> > +
> >  #define tb_stats_enabled(tb, JIT_STATS) \
> > -    (tb && tb->tb_stats && (tb->tb_stats->stats_enabled & JIT_STATS))
> > +    (tb && tb->tb_stats && tbs_stats_enabled(tb->tb_stats, JIT_STATS))
> >
> >  #define stat_per_translation(stat, name) \
> >      (stat->translations.total ? stat->name / stat->translations.total :
> 0)
> > @@ -56,7 +59,6 @@ struct TBStatistics {
> >
> >      struct {
> >          unsigned long total;
> > -        unsigned long uncached;
> >          unsigned long spanning;
> >      } translations;
> >
> > @@ -69,6 +71,9 @@ struct TBStatistics {
> >          uint64_t la;
> >      } time;
> >
> > +    /* HMP information - used for referring to previous search */
> > +    int display_id;
> > +
> >      /* current TB linked to this TBStatistics */
> >      TranslationBlock *tb;
> >  };
> > @@ -89,6 +94,40 @@ struct TbstatsCommand {
> >
> >  void do_hmp_tbstats_safe(CPUState *cpu, run_on_cpu_data icmd);
> >
> > +/**
> > + * dump_coverset_info: report the hottest blocks to cover n% of
> execution
> > + *
> > + * @percentage: cover set percentage
> > + * @use_monitor: redirect output to monitor
> > + *
> > + * Report the hottest blocks to either the log or monitor
> > + */
> > +void dump_coverset_info(int percentage, bool use_monitor);
> > +
> > +
> > +/**
> > + * dump_tbs_info: report the hottest blocks
> > + *
> > + * @count: the limit of hotblocks
> > + * @sort_by: property in which the dump will be sorted
> > + * @use_monitor: redirect output to monitor
> > + *
> > + * Report the hottest blocks to either the log or monitor
> > + */
> > +void dump_tbs_info(int count, int sort_by, bool use_monitor);
> > +
> > +/**
> > + * dump_tb_info: dump information about one TB
> > + *
> > + * @id: the display id of the block (from previous search)
> > + * @mask: the temporary logging mask
> > + * @Use_monitor: redirect output to monitor
> > + *
> > + * Re-run a translation of a block at addr for the purposes of debug
> output
> > + */
> > +void dump_tb_info(int id, int log_mask, bool use_monitor);
> > +
> > +
> >  /* TBStatistic collection controls */
> >  void enable_collect_tb_stats(void);
> >  void disable_collect_tb_stats(void);
> > diff --git a/include/qemu/log-for-trace.h b/include/qemu/log-for-trace.h
> > index 2f0a5b080e..3de88484cb 100644
> > --- a/include/qemu/log-for-trace.h
> > +++ b/include/qemu/log-for-trace.h
> > @@ -20,6 +20,9 @@
> >
> >  /* Private global variable, don't use */
> >  extern int qemu_loglevel;
> > +extern bool to_string;
> > +
> > +extern int32_t max_num_hot_tbs_to_dump;
> >
> >  #define LOG_TRACE          (1 << 15)
> >
> > @@ -31,5 +34,6 @@ static inline bool qemu_loglevel_mask(int mask)
> >
> >  /* main logging function */
> >  int GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...);
> > +int qemu_vlog(const char *fmt, va_list va);
> >
> >  #endif
> > diff --git a/include/qemu/log.h b/include/qemu/log.h
> > index a8d1997cde..804cf90f0f 100644
> > --- a/include/qemu/log.h
> > +++ b/include/qemu/log.h
> > @@ -114,6 +114,8 @@ typedef struct QEMULogItem {
> >  extern const QEMULogItem qemu_log_items[];
> >
> >  void qemu_set_log(int log_flags);
> > +void qemu_log_to_monitor(bool enable);
> > +void qemu_log_to_string(bool enable, GString *s);
> >  void qemu_log_needs_buffers(void);
> >  void qemu_set_log_filename(const char *filename, Error **errp);
> >  void qemu_set_dfilter_ranges(const char *ranges, Error **errp);
> > diff --git a/monitor/misc.c b/monitor/misc.c
> > index 6902e8addb..4af70dba92 100644
> > --- a/monitor/misc.c
> > +++ b/monitor/misc.c
> > @@ -504,6 +504,80 @@ static void hmp_tbstats(Monitor *mon, const QDict
> *qdict)
> >
> >  }
> >
> > +static void hmp_info_tbs(Monitor *mon, const QDict *qdict)
> > +{
> > +    int number_int;
> > +    const char *sortedby_str = NULL;
> > +    if (!tcg_enabled()) {
> > +        error_report("TB information is only available with accel=tcg");
> > +        return;
> > +    }
> > +    if (!tb_ctx.tb_stats.map) {
> > +        error_report("no TB information recorded");
> > +        return;
> > +    }
> > +
> > +    number_int = qdict_get_try_int(qdict, "number", 10);
> > +    sortedby_str = qdict_get_try_str(qdict, "sortedby");
> > +
> > +    int sortedby = SORT_BY_HOTNESS;
> > +    if (sortedby_str == NULL || strcmp(sortedby_str, "hotness") == 0) {
> > +        sortedby = SORT_BY_HOTNESS;
> > +    } else if (strcmp(sortedby_str, "hg") == 0) {
> > +        sortedby = SORT_BY_HG;
> > +    } else if (strcmp(sortedby_str, "spills") == 0) {
> > +        sortedby = SORT_BY_SPILLS;
> > +    } else {
> > +        error_report("valid sort options are: hotness hg spills");
> > +        return;
> > +    }
> > +
> > +    dump_tbs_info(number_int, sortedby, true);
> > +}
> > +
> > +static void hmp_info_tb(Monitor *mon, const QDict *qdict)
> > +{
> > +    const int id = qdict_get_int(qdict, "id");
> > +    const char *flags = qdict_get_try_str(qdict, "flags");
> > +    int mask;
> > +
> > +    if (!tcg_enabled()) {
> > +        error_report("TB information is only available with accel=tcg");
> > +        return;
> > +    }
> > +
> > +    mask = flags ? qemu_str_to_log_mask(flags) : CPU_LOG_TB_IN_ASM;
> > +
> > +    if (!mask) {
> > +        error_report("Unable to parse log flags, see 'help log'");
> > +        return;
> > +    }
> > +
> > +    dump_tb_info(id, mask, true);
> > +}
> > +
> > +static void hmp_info_coverset(Monitor *mon, const QDict *qdict)
> > +{
> > +    int coverage;
> > +    if (!tcg_enabled()) {
> > +        error_report("TB information is only available with accel=tcg");
> > +        return;
> > +    }
> > +    if (!tb_stats_collection_enabled()) {
> > +        error_report("TB information not being recorded");
> > +        return;
> > +    }
> > +
> > +    coverage = qdict_get_try_int(qdict, "coverage", 90);
> > +
> > +    if (coverage < 0 || coverage > 100) {
> > +        error_report("Coverset percentage should be between 0 and 100");
> > +        return;
> > +    }
> > +
> > +    dump_coverset_info(coverage, true);
> > +}
> > +
> >  static void hmp_info_jit(Monitor *mon, const QDict *qdict)
> >  {
> >      if (!tcg_enabled()) {
> > diff --git a/util/log.c b/util/log.c
> > index 09cfb13b45..7d28a844c1 100644
> > --- a/util/log.c
> > +++ b/util/log.c
> > @@ -33,28 +33,58 @@ int qemu_loglevel;
> >  static int log_append = 0;
> >  static GArray *debug_regions;
> >  int32_t max_num_hot_tbs_to_dump;
> > +static bool to_monitor;
> > +bool to_string;
> >
> >  int tcg_collect_tb_stats;
> >  uint32_t default_tbstats_flag;
> >
> > -/* Return the number of characters emitted.  */
> > -int qemu_log(const char *fmt, ...)
> > +GString *string;
> > +
> > +int qemu_vlog(const char *fmt, va_list va)
> >  {
> >      int ret = 0;
> > -    if (qemu_logfile) {
> > -        va_list ap;
> > -        va_start(ap, fmt);
> > -        ret = vfprintf(qemu_logfile, fmt, ap);
> > -        va_end(ap);
> > -
> > -        /* Don't pass back error results.  */
> > -        if (ret < 0) {
> > -            ret = 0;
> > +    if (to_string) {
> > +        if (string) {
> > +            g_string_append_vprintf(string, fmt, va);
> >          }
> > +    } else if (to_monitor) {
> > +        ret = qemu_vprintf(fmt, va);
> > +    } else if (qemu_logfile) {
> > +        ret = vfprintf(qemu_logfile, fmt, va);
> > +    }
> > +
> > +    /* Don't pass back error results.  */
> > +    if (ret < 0) {
> > +        ret = 0;
> >      }
> >      return ret;
> >  }
> >
> > +/* Return the number of characters emitted.  */
> > +int qemu_log(const char *fmt, ...)
> > +{
> > +    int ret = 0;
> > +    va_list ap;
> > +    va_start(ap, fmt);
> > +
> > +    ret = qemu_vlog(fmt, ap);
> > +
> > +    va_end(ap);
> > +    return ret;
> > +}
> > +
> > +void qemu_log_to_monitor(bool enable)
> > +{
> > +    to_monitor = enable;
> > +}
> > +
> > +void qemu_log_to_string(bool enable, GString *s)
> > +{
> > +    to_string = enable;
> > +    string = s;
> > +}
> > +
> >  static bool log_uses_own_buffers;
> >
> >  /* enable or disable low levels log */
> > --
> > 2.22.0
> >
> --
> Dr. David Alan Gilbert / address@hidden / Manchester, UK
>


reply via email to

[Prev in Thread] Current Thread [Next in Thread]