[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 11/23] i386: Add CHECK_NO_VEX
From: |
Paolo Bonzini |
Subject: |
[PATCH 11/23] i386: Add CHECK_NO_VEX |
Date: |
Sat, 27 Aug 2022 01:11:52 +0200 |
From: Paul Brook <paul@nowt.org>
Reject invalid VEX encodings on MMX instructions.
Signed-off-by: Paul Brook <paul@nowt.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20220424220204.2493824-7-paul@nowt.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/translate.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index f7e8cab52d..f155cbb667 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -3186,6 +3186,12 @@ static const struct SSEOpHelper_table7
sse_op_table7[256] = {
#undef BLENDV_OP
#undef SPECIAL_OP
+/* VEX prefix not allowed */
+#define CHECK_NO_VEX(s) do { \
+ if (s->prefix & PREFIX_VEX) \
+ goto illegal_op; \
+ } while (0)
+
static void gen_sse(CPUX86State *env, DisasContext *s, int b,
target_ulong pc_start)
{
@@ -3272,6 +3278,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
b |= (b1 << 8);
switch(b) {
case 0x0e7: /* movntq */
+ CHECK_NO_VEX(s);
if (mod == 3) {
goto illegal_op;
}
@@ -3307,6 +3314,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
}
break;
case 0x6e: /* movd mm, ea */
+ CHECK_NO_VEX(s);
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
@@ -3338,6 +3346,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
}
break;
case 0x6f: /* movq mm, ea */
+ CHECK_NO_VEX(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
@@ -3473,6 +3482,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
break;
case 0x178:
case 0x378:
+ CHECK_NO_VEX(s);
{
int bit_index, field_length;
@@ -3492,6 +3502,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
}
break;
case 0x7e: /* movd ea, mm */
+ CHECK_NO_VEX(s);
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
tcg_gen_ld_i64(s->T0, cpu_env,
@@ -3532,6 +3543,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
gen_op_movq_env_0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(1)));
break;
case 0x7f: /* movq ea, mm */
+ CHECK_NO_VEX(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
@@ -3614,6 +3626,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
offsetof(CPUX86State, xmm_t0.ZMM_L(1)));
op1_offset = offsetof(CPUX86State,xmm_t0);
} else {
+ CHECK_NO_VEX(s);
tcg_gen_movi_tl(s->T0, val);
tcg_gen_st32_tl(s->T0, cpu_env,
offsetof(CPUX86State, mmx_t0.MMX_L(0)));
@@ -3653,6 +3666,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
break;
case 0x02a: /* cvtpi2ps */
case 0x12a: /* cvtpi2pd */
+ CHECK_NO_VEX(s);
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
@@ -3698,6 +3712,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
case 0x12c: /* cvttpd2pi */
case 0x02d: /* cvtps2pi */
case 0x12d: /* cvtpd2pi */
+ CHECK_NO_VEX(s);
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
@@ -3771,6 +3786,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
tcg_gen_st16_tl(s->T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
} else {
+ CHECK_NO_VEX(s);
val &= 3;
tcg_gen_st16_tl(s->T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
@@ -3810,6 +3826,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
}
break;
case 0x2d6: /* movq2dq */
+ CHECK_NO_VEX(s);
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7);
gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)),
@@ -3817,6 +3834,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
gen_op_movq_env_0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(1)));
break;
case 0x3d6: /* movdq2q */
+ CHECK_NO_VEX(s);
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7) | REX_B(s);
gen_op_movq(s, offsetof(CPUX86State, fpregs[reg & 7].mmx),
@@ -3831,6 +3849,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
tcg_gen_addi_ptr(s->ptr0, cpu_env, ZMM_OFFSET(rm));
gen_helper_pmovmskb_xmm(s->tmp2_i32, cpu_env, s->ptr0);
} else {
+ CHECK_NO_VEX(s);
rm = (modrm & 7);
tcg_gen_addi_ptr(s->ptr0, cpu_env,
offsetof(CPUX86State, fpregs[rm].mmx));
@@ -3901,6 +3920,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
}
op6->fn[b1].op1(cpu_env, s->ptr0, s->ptr1);
} else {
+ CHECK_NO_VEX(s);
if ((op6->flags & SSE_OPF_MMX) == 0) {
goto unknown_op;
}
@@ -3934,6 +3954,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
case 0x3f0: /* crc32 Gd,Eb */
case 0x3f1: /* crc32 Gd,Ey */
do_crc32:
+ CHECK_NO_VEX(s);
if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
goto illegal_op;
}
@@ -3956,6 +3977,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
case 0x1f0: /* crc32 or movbe */
case 0x1f1:
+ CHECK_NO_VEX(s);
/* For these insns, the f3 prefix is supposed to have priority
over the 66 prefix, but that's not what we implement above
setting b1. */
@@ -3965,6 +3987,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
/* FALLTHRU */
case 0x0f0: /* movbe Gy,My */
case 0x0f1: /* movbe My,Gy */
+ CHECK_NO_VEX(s);
if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
goto illegal_op;
}
@@ -4131,6 +4154,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
case 0x1f6: /* adcx Gy, Ey */
case 0x2f6: /* adox Gy, Ey */
+ CHECK_NO_VEX(s);
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
goto illegal_op;
} else {
@@ -4436,6 +4460,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
}
if (b1 == 0) {
+ CHECK_NO_VEX(s);
/* MMX */
if ((op7->flags & SSE_OPF_MMX) == 0) {
goto illegal_op;
@@ -4582,6 +4607,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s,
int b,
op2_offset = ZMM_OFFSET(rm);
}
} else {
+ CHECK_NO_VEX(s);
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
--
2.37.1
- [PATCH 01/23] i386: do not use MOVL to move data between SSE registers, (continued)
- [PATCH 01/23] i386: do not use MOVL to move data between SSE registers, Paolo Bonzini, 2022/08/26
- [PATCH 04/23] i386: Rework sse_op_table1, Paolo Bonzini, 2022/08/26
- [PATCH 05/23] i386: Rework sse_op_table6/7, Paolo Bonzini, 2022/08/26
- [PATCH 06/23] i386: Move 3DNOW decoder, Paolo Bonzini, 2022/08/26
- [PATCH 07/23] i386: check SSE table flags instead of hardcoding opcodes, Paolo Bonzini, 2022/08/26
- [PATCH 08/23] i386: isolate MMX code more, Paolo Bonzini, 2022/08/26
- [PATCH 09/23] i386: Add size suffix to vector FP helpers, Paolo Bonzini, 2022/08/26
- [PATCH 11/23] i386: Add CHECK_NO_VEX,
Paolo Bonzini <=
- [PATCH 10/23] i386: do not cast gen_helper_* function pointers, Paolo Bonzini, 2022/08/26
- [PATCH 12/23] i386: Rewrite vector shift helper, Paolo Bonzini, 2022/08/26
- [PATCH 23/23] i386: AVX+AES helpers prep, Paolo Bonzini, 2022/08/26
- [PATCH 19/23] i386: Destructive FP helpers for AVX, Paolo Bonzini, 2022/08/26
- [PATCH 13/23] i386: Rewrite simple integer vector helpers, Paolo Bonzini, 2022/08/26
- [PATCH 14/23] i386: Misc integer AVX helper prep, Paolo Bonzini, 2022/08/26
- [PATCH 15/23] i386: Destructive vector helpers for AVX, Paolo Bonzini, 2022/08/26