[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 21/28] tcg: Allocate code_gen_buffer into struct tcg_region_st
From: |
Richard Henderson |
Subject: |
[PATCH v3 21/28] tcg: Allocate code_gen_buffer into struct tcg_region_state |
Date: |
Sun, 2 May 2021 16:18:37 -0700 |
Do not mess around with setting values within tcg_init_ctx.
Put the values into 'region' directly, which is where they
will live for the lifetime of the program.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/region.c | 64 ++++++++++++++++++++++------------------------------
1 file changed, 27 insertions(+), 37 deletions(-)
diff --git a/tcg/region.c b/tcg/region.c
index 893256f9f4..d6499f7d98 100644
--- a/tcg/region.c
+++ b/tcg/region.c
@@ -70,13 +70,12 @@ static size_t tree_size;
bool in_code_gen_buffer(const void *p)
{
- const TCGContext *s = &tcg_init_ctx;
/*
* Much like it is valid to have a pointer to the byte past the
* end of an array (so long as you don't dereference it), allow
* a pointer to the byte past the end of the code gen buffer.
*/
- return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
+ return (size_t)(p - region.start_aligned) <= region.total_size;
}
#ifdef CONFIG_DEBUG_TCG
@@ -557,8 +556,8 @@ static bool alloc_code_gen_buffer(size_t tb_size, int
splitwx, Error **errp)
}
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
- tcg_ctx->code_gen_buffer = buf;
- tcg_ctx->code_gen_buffer_size = size;
+ region.start_aligned = buf;
+ region.total_size = size;
return true;
}
#elif defined(_WIN32)
@@ -579,8 +578,8 @@ static bool alloc_code_gen_buffer(size_t size, int splitwx,
Error **errp)
return false;
}
- tcg_ctx->code_gen_buffer = buf;
- tcg_ctx->code_gen_buffer_size = size;
+ region.start_aligned = buf;
+ region.total_size = size;
return true;
}
#else
@@ -595,7 +594,6 @@ static bool alloc_code_gen_buffer_anon(size_t size, int
prot,
"allocate %zu bytes for jit buffer", size);
return false;
}
- tcg_ctx->code_gen_buffer_size = size;
#ifdef __mips__
if (cross_256mb(buf, size)) {
@@ -633,7 +631,8 @@ static bool alloc_code_gen_buffer_anon(size_t size, int
prot,
/* Request large pages for the buffer. */
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
- tcg_ctx->code_gen_buffer = buf;
+ region.start_aligned = buf;
+ region.total_size = size;
return true;
}
@@ -654,8 +653,8 @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t
size, Error **errp)
return false;
}
/* The size of the mapping may have been adjusted. */
- size = tcg_ctx->code_gen_buffer_size;
- buf_rx = tcg_ctx->code_gen_buffer;
+ buf_rx = region.start_aligned;
+ size = region.total_size;
#endif
buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
@@ -677,8 +676,8 @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t
size, Error **errp)
#endif
close(fd);
- tcg_ctx->code_gen_buffer = buf_rw;
- tcg_ctx->code_gen_buffer_size = size;
+ region.start_aligned = buf_rw;
+ region.total_size = size;
tcg_splitwx_diff = buf_rx - buf_rw;
/* Request large pages for the buffer and the splitwx. */
@@ -729,7 +728,7 @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t
size, Error **errp)
return false;
}
- buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
+ buf_rw = region.start_aligned;
buf_rx = 0;
ret = mach_vm_remap(mach_task_self(),
&buf_rx,
@@ -841,11 +840,8 @@ static bool alloc_code_gen_buffer(size_t size, int
splitwx, Error **errp)
*/
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
{
- void *buf, *aligned, *end;
- size_t total_size;
size_t page_size;
size_t region_size;
- size_t n_regions;
size_t i;
bool ok;
@@ -853,39 +849,33 @@ void tcg_region_init(size_t tb_size, int splitwx,
unsigned max_cpus)
splitwx, &error_fatal);
assert(ok);
- buf = tcg_init_ctx.code_gen_buffer;
- total_size = tcg_init_ctx.code_gen_buffer_size;
- page_size = qemu_real_host_page_size;
- n_regions = tcg_n_regions(total_size, max_cpus);
-
- /* The first region will be 'aligned - buf' bytes larger than the others */
- aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + total_size);
-
/*
* Make region_size a multiple of page_size, using aligned as the start.
* As a result of this we might end up with a few extra pages at the end of
* the buffer; we will assign those to the last region.
*/
- region_size = (total_size - (aligned - buf)) / n_regions;
+ region.n = tcg_n_regions(region.total_size, max_cpus);
+ page_size = qemu_real_host_page_size;
+ region_size = region.total_size / region.n;
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
/* A region must have at least 2 pages; one code, one guard */
g_assert(region_size >= 2 * page_size);
+ region.stride = region_size;
+
+ /* Reserve space for guard pages. */
+ region.size = region_size - page_size;
+ region.total_size -= page_size;
+
+ /*
+ * The first region will be smaller than the others, via the prologue,
+ * which has yet to be allocated. For now, the first region begins at
+ * the page boundary.
+ */
+ region.after_prologue = region.start_aligned;
/* init the region struct */
qemu_mutex_init(®ion.lock);
- region.n = n_regions;
- region.size = region_size - page_size;
- region.stride = region_size;
- region.after_prologue = buf;
- region.start_aligned = aligned;
- /* page-align the end, since its last page will be a guard page */
- end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size);
- /* account for that last guard page */
- end -= page_size;
- total_size = end - aligned;
- region.total_size = total_size;
/*
* Set guard pages in the rw buffer, as that's the one into which
--
2.25.1
- [PATCH v3 08/28] accel/tcg: Inline cpu_gen_init, (continued)
- [PATCH v3 08/28] accel/tcg: Inline cpu_gen_init, Richard Henderson, 2021/05/02
- [PATCH v3 10/28] accel/tcg: Rename tcg_init to tcg_init_machine, Richard Henderson, 2021/05/02
- [PATCH v3 07/28] tcg: Split out region.c, Richard Henderson, 2021/05/02
- [PATCH v3 15/28] tcg: Move MAX_CODE_GEN_BUFFER_SIZE to tcg-target.h, Richard Henderson, 2021/05/02
- [PATCH v3 11/28] tcg: Create tcg_init, Richard Henderson, 2021/05/02
- [PATCH v3 18/28] tcg: Tidy tcg_n_regions, Richard Henderson, 2021/05/02
- [PATCH v3 16/28] tcg: Replace region.end with region.total_size, Richard Henderson, 2021/05/02
- [PATCH v3 24/28] util/osdep: Add qemu_mprotect_rw, Richard Henderson, 2021/05/02
- [PATCH v3 17/28] tcg: Rename region.start to region.after_prologue, Richard Henderson, 2021/05/02
- [PATCH v3 22/28] tcg: Return the map protection from alloc_code_gen_buffer, Richard Henderson, 2021/05/02
- [PATCH v3 21/28] tcg: Allocate code_gen_buffer into struct tcg_region_state,
Richard Henderson <=
- [PATCH v3 20/28] tcg: Move in_code_gen_buffer and tests to region.c, Richard Henderson, 2021/05/02
- [PATCH v3 23/28] tcg: Sink qemu_madvise call to common code, Richard Henderson, 2021/05/02
- [PATCH v3 27/28] tcg: When allocating for !splitwx, begin with PROT_NONE, Richard Henderson, 2021/05/02
- [PATCH v3 28/28] tcg: Move tcg_init_ctx and tcg_ctx from accel/tcg/, Richard Henderson, 2021/05/02
- [PATCH v3 19/28] tcg: Tidy split_cross_256mb, Richard Henderson, 2021/05/02
- [PATCH v3 26/28] tcg: Merge buffer protection and guard page protection, Richard Henderson, 2021/05/02
- [PATCH v3 09/28] accel/tcg: Move alloc_code_gen_buffer to tcg/region.c, Richard Henderson, 2021/05/02
- [PATCH v3 12/28] accel/tcg: Merge tcg_exec_init into tcg_init_machine, Richard Henderson, 2021/05/02
- [PATCH v3 13/28] accel/tcg: Pass down max_cpus to tcg_init, Richard Henderson, 2021/05/02
- [PATCH v3 14/28] tcg: Introduce tcg_max_ctxs, Richard Henderson, 2021/05/02