[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v27 18/20] target/i386: gdbstub: introduce aux functions to read/
From: |
Claudio Fontana |
Subject: |
[PATCH v27 18/20] target/i386: gdbstub: introduce aux functions to read/write CS64 regs |
Date: |
Tue, 16 Mar 2021 19:30:33 +0100 |
a number of registers are read as 64bit under the condition that
(hflags & HF_CS64_MASK) || TARGET_X86_64)
and a number of registers are written as 64bit under the condition that
(hflags & HF_CS64_MASK).
Provide some auxiliary functions that do that.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
target/i386/gdbstub.c | 155 ++++++++++++++----------------------------
1 file changed, 51 insertions(+), 104 deletions(-)
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
index 41e265fc67..4ad1295425 100644
--- a/target/i386/gdbstub.c
+++ b/target/i386/gdbstub.c
@@ -78,6 +78,23 @@ static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
#define GDB_FORCE_64 0
#endif
+static int gdb_read_reg_cs64(uint32_t hflags, GByteArray *buf, target_ulong
val)
+{
+ if ((hflags & HF_CS64_MASK) || GDB_FORCE_64) {
+ return gdb_get_reg64(buf, val);
+ }
+ return gdb_get_reg32(buf, val);
+}
+
+static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
+{
+ if (hflags & HF_CS64_MASK) {
+ *val = ldq_p(buf);
+ return 8;
+ }
+ *val = ldl_p(buf);
+ return 4;
+}
int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
@@ -142,25 +159,14 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray
*mem_buf, int n)
return gdb_get_reg32(mem_buf, env->segs[R_FS].selector);
case IDX_SEG_REGS + 5:
return gdb_get_reg32(mem_buf, env->segs[R_GS].selector);
-
case IDX_SEG_REGS + 6:
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->segs[R_FS].base);
- }
- return gdb_get_reg32(mem_buf, env->segs[R_FS].base);
-
+ return gdb_read_reg_cs64(env->hflags, mem_buf,
env->segs[R_FS].base);
case IDX_SEG_REGS + 7:
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->segs[R_GS].base);
- }
- return gdb_get_reg32(mem_buf, env->segs[R_GS].base);
+ return gdb_read_reg_cs64(env->hflags, mem_buf,
env->segs[R_GS].base);
case IDX_SEG_REGS + 8:
#ifdef TARGET_X86_64
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->kernelgsbase);
- }
- return gdb_get_reg32(mem_buf, env->kernelgsbase);
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->kernelgsbase);
#else
return gdb_get_reg32(mem_buf, 0);
#endif
@@ -188,45 +194,23 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray
*mem_buf, int n)
return gdb_get_reg32(mem_buf, env->mxcsr);
case IDX_CTL_CR0_REG:
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->cr[0]);
- }
- return gdb_get_reg32(mem_buf, env->cr[0]);
-
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[0]);
case IDX_CTL_CR2_REG:
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->cr[2]);
- }
- return gdb_get_reg32(mem_buf, env->cr[2]);
-
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[2]);
case IDX_CTL_CR3_REG:
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->cr[3]);
- }
- return gdb_get_reg32(mem_buf, env->cr[3]);
-
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[3]);
case IDX_CTL_CR4_REG:
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->cr[4]);
- }
- return gdb_get_reg32(mem_buf, env->cr[4]);
-
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[4]);
case IDX_CTL_CR8_REG:
-#ifdef CONFIG_SOFTMMU
+#ifndef CONFIG_USER_ONLY
tpr = cpu_get_apic_tpr(cpu->apic_state);
#else
tpr = 0;
#endif
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, tpr);
- }
- return gdb_get_reg32(mem_buf, tpr);
+ return gdb_read_reg_cs64(env->hflags, mem_buf, tpr);
case IDX_CTL_EFER_REG:
- if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) {
- return gdb_get_reg64(mem_buf, env->efer);
- }
- return gdb_get_reg32(mem_buf, env->efer);
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->efer);
}
}
return 0;
@@ -266,7 +250,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- uint32_t tmp;
+ target_ulong tmp;
+ int len;
/* N.B. GDB can't deal with changes in registers or sizes in the middle
of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
@@ -329,30 +314,13 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf);
case IDX_SEG_REGS + 5:
return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
-
case IDX_SEG_REGS + 6:
- if (env->hflags & HF_CS64_MASK) {
- env->segs[R_FS].base = ldq_p(mem_buf);
- return 8;
- }
- env->segs[R_FS].base = ldl_p(mem_buf);
- return 4;
-
+ return gdb_write_reg_cs64(env->hflags, mem_buf,
&env->segs[R_FS].base);
case IDX_SEG_REGS + 7:
- if (env->hflags & HF_CS64_MASK) {
- env->segs[R_GS].base = ldq_p(mem_buf);
- return 8;
- }
- env->segs[R_GS].base = ldl_p(mem_buf);
- return 4;
-
+ return gdb_write_reg_cs64(env->hflags, mem_buf,
&env->segs[R_GS].base);
case IDX_SEG_REGS + 8:
#ifdef TARGET_X86_64
- if (env->hflags & HF_CS64_MASK) {
- env->kernelgsbase = ldq_p(mem_buf);
- return 8;
- }
- env->kernelgsbase = ldl_p(mem_buf);
+ return gdb_write_reg_cs64(env->hflags, mem_buf,
&env->kernelgsbase);
#endif
return 4;
@@ -382,57 +350,36 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t
*mem_buf, int n)
return 4;
case IDX_CTL_CR0_REG:
- if (env->hflags & HF_CS64_MASK) {
- cpu_x86_update_cr0(env, ldq_p(mem_buf));
- return 8;
- }
- cpu_x86_update_cr0(env, ldl_p(mem_buf));
- return 4;
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+ cpu_x86_update_cr0(env, tmp);
+ return len;
case IDX_CTL_CR2_REG:
- if (env->hflags & HF_CS64_MASK) {
- env->cr[2] = ldq_p(mem_buf);
- return 8;
- }
- env->cr[2] = ldl_p(mem_buf);
- return 4;
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+ env->cr[2] = tmp;
+ return len;
case IDX_CTL_CR3_REG:
- if (env->hflags & HF_CS64_MASK) {
- cpu_x86_update_cr3(env, ldq_p(mem_buf));
- return 8;
- }
- cpu_x86_update_cr3(env, ldl_p(mem_buf));
- return 4;
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+ cpu_x86_update_cr3(env, tmp);
+ return len;
case IDX_CTL_CR4_REG:
- if (env->hflags & HF_CS64_MASK) {
- cpu_x86_update_cr4(env, ldq_p(mem_buf));
- return 8;
- }
- cpu_x86_update_cr4(env, ldl_p(mem_buf));
- return 4;
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+ cpu_x86_update_cr4(env, tmp);
+ return len;
case IDX_CTL_CR8_REG:
- if (env->hflags & HF_CS64_MASK) {
-#ifdef CONFIG_SOFTMMU
- cpu_set_apic_tpr(cpu->apic_state, ldq_p(mem_buf));
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+#ifndef CONFIG_USER_ONLY
+ cpu_set_apic_tpr(cpu->apic_state, tmp);
#endif
- return 8;
- }
-#ifdef CONFIG_SOFTMMU
- cpu_set_apic_tpr(cpu->apic_state, ldl_p(mem_buf));
-#endif
- return 4;
+ return len;
case IDX_CTL_EFER_REG:
- if (env->hflags & HF_CS64_MASK) {
- cpu_load_efer(env, ldq_p(mem_buf));
- return 8;
- }
- cpu_load_efer(env, ldl_p(mem_buf));
- return 4;
-
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+ cpu_load_efer(env, tmp);
+ return len;
}
}
/* Unrecognised register. */
--
2.26.2
- [PATCH v27 03/20] i386: split cpu accelerators from cpu.c, using AccelCPUClass, (continued)
- [PATCH v27 03/20] i386: split cpu accelerators from cpu.c, using AccelCPUClass, Claudio Fontana, 2021/03/16
- [PATCH v27 04/20] cpu: call AccelCPUClass::cpu_realizefn in cpu_exec_realizefn, Claudio Fontana, 2021/03/16
- [PATCH v27 02/20] target/i386: Split out do_fsave, do_frstor, do_fxsave, do_fxrstor, Claudio Fontana, 2021/03/16
- [PATCH v27 08/20] meson: add target_user_arch, Claudio Fontana, 2021/03/16
- [PATCH v27 07/20] accel-cpu: make cpu_realizefn return a bool, Claudio Fontana, 2021/03/16
- [PATCH v27 06/20] target/i386: fix host_cpu_adjust_phys_bits error handling, Claudio Fontana, 2021/03/16
- [PATCH v27 05/20] accel: introduce new accessor functions, Claudio Fontana, 2021/03/16
- [PATCH v27 10/20] i386: split smm helper (sysemu), Claudio Fontana, 2021/03/16
- [PATCH v27 11/20] i386: split tcg excp_helper into sysemu and user parts, Claudio Fontana, 2021/03/16
- [PATCH v27 16/20] i386: split seg_helper into user-only and sysemu parts, Claudio Fontana, 2021/03/16
- [PATCH v27 18/20] target/i386: gdbstub: introduce aux functions to read/write CS64 regs,
Claudio Fontana <=
- [PATCH v27 20/20] i386: make cpu_load_efer sysemu-only, Claudio Fontana, 2021/03/16
- [PATCH v27 09/20] i386: split off sysemu-only functionality in tcg-cpu, Claudio Fontana, 2021/03/16
- [PATCH v27 15/20] i386: split svm_helper into sysemu and stub-only user, Claudio Fontana, 2021/03/16
- [PATCH v27 14/20] i386: separate fpu_helper sysemu-only parts, Claudio Fontana, 2021/03/16
- [PATCH v27 13/20] i386: split misc helper user stubs and sysemu part, Claudio Fontana, 2021/03/16
- [PATCH v27 12/20] i386: move TCG bpt_helper into sysemu/, Claudio Fontana, 2021/03/16
- [PATCH v27 17/20] i386: split off sysemu part of cpu.c, Claudio Fontana, 2021/03/16
- [PATCH v27 19/20] target/i386: gdbstub: only write CR0/CR2/CR3/EFER for sysemu, Claudio Fontana, 2021/03/16
- Re: [PATCH v27 00/20] i386 cleanup PART 2, no-reply, 2021/03/16