[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 13/41] tcg: Use Error with alloc_code_gen_buffer
From: |
Richard Henderson |
Subject: |
[PATCH v3 13/41] tcg: Use Error with alloc_code_gen_buffer |
Date: |
Thu, 5 Nov 2020 19:28:53 -0800 |
Report better error messages than just "could not allocate".
Let alloc_code_gen_buffer set ctx->code_gen_buffer_size
and ctx->code_gen_buffer, and simply return bool.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/translate-all.c | 60 ++++++++++++++++++++++-----------------
1 file changed, 34 insertions(+), 26 deletions(-)
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 7b85ddacd2..2824b3e387 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -59,6 +59,7 @@
#include "sysemu/cpus.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/tcg.h"
+#include "qapi/error.h"
/* #define DEBUG_TB_INVALIDATE */
/* #define DEBUG_TB_FLUSH */
@@ -963,7 +964,7 @@ static void page_lock_pair(PageDesc **ret_p1,
tb_page_addr_t phys1,
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
-static inline size_t size_code_gen_buffer(size_t tb_size)
+static size_t size_code_gen_buffer(size_t tb_size)
{
/* Size the buffer. */
if (tb_size == 0) {
@@ -1014,7 +1015,7 @@ static inline void *split_cross_256mb(void *buf1, size_t
size1)
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
-static inline void *alloc_code_gen_buffer(void)
+static bool alloc_code_gen_buffer(size_t tb_size, Error **errp)
{
void *buf = static_code_gen_buffer;
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
@@ -1027,9 +1028,8 @@ static inline void *alloc_code_gen_buffer(void)
size = end - buf;
/* Honor a command-line option limiting the size of the buffer. */
- if (size > tcg_ctx->code_gen_buffer_size) {
- size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
- qemu_real_host_page_size);
+ if (size > tb_size) {
+ size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
}
tcg_ctx->code_gen_buffer_size = size;
@@ -1041,31 +1041,43 @@ static inline void *alloc_code_gen_buffer(void)
#endif
if (qemu_mprotect_rwx(buf, size)) {
- abort();
+ error_setg_errno(errp, errno, "mprotect of jit buffer");
+ return false;
}
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
- return buf;
+ tcg_ctx->code_gen_buffer = buf;
+ return true;
}
#elif defined(_WIN32)
-static inline void *alloc_code_gen_buffer(void)
+static bool alloc_code_gen_buffer(size_t size, Error **errp)
{
- size_t size = tcg_ctx->code_gen_buffer_size;
- return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
+ void *buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ if (buf == NULL) {
+ error_setg_win32(errp, GetLastError(),
+ "allocate %zu bytes for jit buffer", size);
+ return false;
+ }
+
+ tcg_ctx->code_gen_buffer = buf;
+ tcg_ctx->code_gen_buffer_size = size;
+ return true;
}
#else
-static inline void *alloc_code_gen_buffer(void)
+static bool alloc_code_gen_buffer(size_t size, Error **errp)
{
int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
- size_t size = tcg_ctx->code_gen_buffer_size;
void *buf;
buf = mmap(NULL, size, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
- return NULL;
+ error_setg_errno(errp, errno,
+ "allocate %zu bytes for jit buffer", size);
+ return false;
}
+ tcg_ctx->code_gen_buffer_size = size;
#ifdef __mips__
if (cross_256mb(buf, size)) {
@@ -1104,20 +1116,11 @@ static inline void *alloc_code_gen_buffer(void)
/* Request large pages for the buffer. */
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
- return buf;
+ tcg_ctx->code_gen_buffer = buf;
+ return true;
}
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
-static inline void code_gen_alloc(size_t tb_size)
-{
- tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
- tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
- if (tcg_ctx->code_gen_buffer == NULL) {
- fprintf(stderr, "Could not allocate dynamic translator buffer\n");
- exit(1);
- }
-}
-
static bool tb_cmp(const void *ap, const void *bp)
{
const TranslationBlock *a = ap;
@@ -1144,11 +1147,16 @@ static void tb_htable_init(void)
size. */
void tcg_exec_init(unsigned long tb_size)
{
+ bool ok;
+
tcg_allowed = true;
cpu_gen_init();
page_init();
tb_htable_init();
- code_gen_alloc(tb_size);
+
+ ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size), &error_fatal);
+ assert(ok);
+
#if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and
initialize the prologue now. */
--
2.25.1
- [PATCH v3 02/41] tcg: Move tcg prologue pointer out of TCGContext, (continued)
- [PATCH v3 02/41] tcg: Move tcg prologue pointer out of TCGContext, Richard Henderson, 2020/11/05
- [PATCH v3 05/41] tcg: Introduce tcg_splitwx_to_{rx,rw}, Richard Henderson, 2020/11/05
- [PATCH v3 06/41] tcg: Adjust TCGLabel for const, Richard Henderson, 2020/11/05
- [PATCH v3 07/41] tcg: Adjust tcg_out_call for const, Richard Henderson, 2020/11/05
- [PATCH v3 08/41] tcg: Adjust tcg_out_label for const, Richard Henderson, 2020/11/05
- [PATCH v3 09/41] tcg: Adjust tcg_register_jit for const, Richard Henderson, 2020/11/05
- [PATCH v3 10/41] tcg: Adjust tb_target_set_jmp_target for split-wx, Richard Henderson, 2020/11/05
- [PATCH v3 11/41] tcg: Make DisasContextBase.tb const, Richard Henderson, 2020/11/05
- [PATCH v3 12/41] tcg: Make tb arg to synchronize_from_tb const, Richard Henderson, 2020/11/05
- [PATCH v3 13/41] tcg: Use Error with alloc_code_gen_buffer,
Richard Henderson <=
- [PATCH v3 14/41] tcg: Add --accel tcg,split-wx property, Richard Henderson, 2020/11/05
- [PATCH v3 15/41] accel/tcg: Support split-wx for linux with memfd, Richard Henderson, 2020/11/05
- [PATCH v3 18/41] tcg/i386: Support split-wx code generation, Richard Henderson, 2020/11/05
- [PATCH v3 16/41] accel/tcg: Support split-wx for darwin/iOS with vm_remap, Richard Henderson, 2020/11/05
- [PATCH v3 19/41] tcg/aarch64: Use B not BL for tcg_out_goto_long, Richard Henderson, 2020/11/05