[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 5/5] target/arm: MTE tag check stubs
From: |
Rémi Denis-Courmont |
Subject: |
[PATCH 5/5] target/arm: MTE tag check stubs |
Date: |
Fri, 13 Mar 2020 16:00:23 +0200 |
From: Rémi Denis-Courmont <address@hidden>
This adds stub function calls for memory tag checking. Much like the
SP alignment check stub, these do not have any effect and will be
optimized at compilation time.
Note: SVE tag checks are not included.
Signed-off-by: Rémi Denis-Courmont <address@hidden>
---
target/arm/translate-a64.c | 88 +++++++++++++++++++++++++++++++++-----
1 file changed, 77 insertions(+), 11 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 9382f53794..769206106e 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -855,6 +855,17 @@ static void gen_deposit_tag(TCGv_i64 dest, TCGv_i64
source, TCGv_i64 tag)
tcg_temp_free_i64(tmp);
}
+static void gen_check_tag_memidx(DisasContext *s, TCGv_i64 tcg_addr, int size,
+ bool iswrite, int mem_idx)
+{
+}
+
+static void gen_check_tag(DisasContext *s, TCGv_i64 tcg_addr, int size,
+ bool iswrite)
+{
+ gen_check_tag_memidx(s, tcg_addr, size, iswrite, get_mem_index(s));
+}
+
/*
* Load/Store generators
*/
@@ -2404,13 +2415,16 @@ static void gen_compare_and_swap(DisasContext *s, int
rs, int rt,
{
TCGv_i64 tcg_rs = cpu_reg(s, rs);
TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
int memidx = get_mem_index(s);
TCGv_i64 clean_addr;
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, tcg_rn, size, true);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, tcg_rn);
tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
size | MO_ALIGN | s->be_data);
}
@@ -2422,13 +2436,16 @@ static void gen_compare_and_swap_pair(DisasContext *s,
int rs, int rt,
TCGv_i64 s2 = cpu_reg(s, rs + 1);
TCGv_i64 t1 = cpu_reg(s, rt);
TCGv_i64 t2 = cpu_reg(s, rt + 1);
+ TCGv_i64 dirty_addr = cpu_reg_sp(s, rn);
TCGv_i64 clean_addr;
int memidx = get_mem_index(s);
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, true);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
if (size == 2) {
TCGv_i64 cmp = tcg_temp_new_i64();
@@ -2542,27 +2559,33 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
int is_lasr = extract32(insn, 15, 1);
int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
int size = extract32(insn, 30, 2);
- TCGv_i64 clean_addr;
+ TCGv_i64 dirty_addr, clean_addr;
switch (o2_L_o1_o0) {
case 0x0: /* STXR */
case 0x1: /* STLXR */
+ dirty_addr = cpu_reg_sp(s, rn);
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, true);
}
if (is_lasr) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
return;
case 0x4: /* LDXR */
case 0x5: /* LDAXR */
+ dirty_addr = cpu_reg_sp(s, rn);
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, false);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
s->is_ldex = true;
gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
if (is_lasr) {
@@ -2578,11 +2601,14 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
/* fall through */
case 0x9: /* STLR */
/* Generate ISS for non-exclusive accesses including LASR. */
+ dirty_addr = cpu_reg_sp(s, rn);
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, true);
}
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
return;
@@ -2595,10 +2621,13 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
/* fall through */
case 0xd: /* LDAR */
/* Generate ISS for non-exclusive accesses including LASR. */
+ dirty_addr = cpu_reg_sp(s, rn);
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, false);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false, true, rt,
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
@@ -2606,13 +2635,16 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
case 0x2: case 0x3: /* CASP / STXP */
if (size & 2) { /* STXP / STLXP */
+ dirty_addr = cpu_reg_sp(s, rn);
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, true);
}
if (is_lasr) {
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
return;
}
@@ -2627,10 +2659,13 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
case 0x6: case 0x7: /* CASPA / LDXP */
if (size & 2) { /* LDXP / LDAXP */
+ dirty_addr = cpu_reg_sp(s, rn);
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, false);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
s->is_ldex = true;
gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
if (is_lasr) {
@@ -2881,6 +2916,7 @@ static void disas_ldst_pair(DisasContext *s, uint32_t
insn)
bool is_signed = false;
bool postindex = false;
bool wback = false;
+ bool memtag = false;
TCGv_i64 clean_addr, dirty_addr;
@@ -2902,6 +2938,7 @@ static void disas_ldst_pair(DisasContext *s, uint32_t
insn)
return;
}
+ memtag = true;
size = 3; /* sic! */
}
}
@@ -2947,6 +2984,10 @@ static void disas_ldst_pair(DisasContext *s, uint32_t
insn)
if (!postindex) {
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
}
+ if (!memtag && rn != 31) {
+ /* Check both loads or stores, size is doubled */
+ gen_check_tag(s, dirty_addr, size + 1, !is_load);
+ }
clean_addr = clean_data_tbi(s, dirty_addr);
if (is_vector) {
@@ -3088,6 +3129,10 @@ static void disas_ldst_reg_imm9(DisasContext *s,
uint32_t insn,
clean_addr = clean_data_tbi(s, dirty_addr);
if (is_vector) {
+ if (rn != 31) {
+ gen_check_tag(s, dirty_addr, size, is_store);
+ }
+
if (is_store) {
do_fp_st(s, rt, clean_addr, size);
} else {
@@ -3098,6 +3143,10 @@ static void disas_ldst_reg_imm9(DisasContext *s,
uint32_t insn,
int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+ if (rn != 31) {
+ gen_check_tag_memidx(s, dirty_addr, size, is_store, memidx);
+ }
+
if (is_store) {
do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
iss_valid, rt, iss_sf, false);
@@ -3192,6 +3241,9 @@ static void disas_ldst_reg_roffset(DisasContext *s,
uint32_t insn,
ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
+ if (rn != 31) {
+ gen_check_tag(s, dirty_addr, size, is_store);
+ }
clean_addr = clean_data_tbi(s, dirty_addr);
if (is_vector) {
@@ -3277,6 +3329,9 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s,
uint32_t insn,
dirty_addr = read_cpu_reg_sp(s, rn, 1);
offset = imm12 << size;
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
+ if (rn != 31) {
+ gen_check_tag(s, dirty_addr, size, is_store);
+ }
clean_addr = clean_data_tbi(s, dirty_addr);
if (is_vector) {
@@ -3320,7 +3375,7 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t
insn,
int o3_opc = extract32(insn, 12, 4);
bool r = extract32(insn, 22, 1);
bool a = extract32(insn, 23, 1);
- TCGv_i64 tcg_rs, clean_addr;
+ TCGv_i64 tcg_rs, dirty_addr, clean_addr;
AtomicThreeOpFn *fn;
if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
@@ -3367,10 +3422,14 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t
insn,
return;
}
+ dirty_addr = cpu_reg_sp(s, rn);
+
if (rn == 31) {
gen_check_sp_alignment(s);
+ } else {
+ gen_check_tag(s, dirty_addr, size, true);
}
- clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+ clean_addr = clean_data_tbi(s, dirty_addr);
if (o3_opc == 014) {
/*
@@ -3446,6 +3505,7 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn,
offset = sextract32(offset << size, 0, 10 + size);
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
+ gen_check_tag(s, dirty_addr, size, false);
/* Note that "clean" and "dirty" here refer to TBI not PAC. */
clean_addr = clean_data_tbi(s, dirty_addr);
@@ -3689,6 +3749,9 @@ static void disas_ldst_multiple_struct(DisasContext *s,
uint32_t insn)
elements = (is_q ? 16 : 8) / ebytes;
tcg_rn = cpu_reg_sp(s, rn);
+ if (is_postidx || rn != 31) {
+ gen_check_tag(s, tcg_rn, size, is_store);
+ }
clean_addr = clean_data_tbi(s, tcg_rn);
tcg_ebytes = tcg_const_i64(ebytes);
@@ -3832,6 +3895,9 @@ static void disas_ldst_single_struct(DisasContext *s,
uint32_t insn)
}
tcg_rn = cpu_reg_sp(s, rn);
+ if (is_postidx || rn != 31) {
+ gen_check_tag(s, tcg_rn, size, !is_load);
+ }
clean_addr = clean_data_tbi(s, tcg_rn);
tcg_ebytes = tcg_const_i64(ebytes);
--
2.25.1
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [PATCH 5/5] target/arm: MTE tag check stubs,
Rémi Denis-Courmont <=