[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 37/43] target/arm: Enforce alignment for aa64 load-acq/store-rel
From: |
Peter Maydell |
Subject: |
[PULL 37/43] target/arm: Enforce alignment for aa64 load-acq/store-rel |
Date: |
Fri, 30 Apr 2021 11:34:31 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210419202257.161730-28-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/translate-a64.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index b90d6880e78..ac60dcf7602 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2699,7 +2699,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
true, rn != 31, size);
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt,
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
return;
@@ -2716,8 +2717,9 @@ static void disas_ldst_excl(DisasContext *s, uint32_t
insn)
}
clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
false, rn != 31, size);
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, true, rt,
- disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
+ rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
return;
@@ -3505,15 +3507,18 @@ static void disas_ldst_ldapr_stlr(DisasContext *s,
uint32_t insn)
int size = extract32(insn, 30, 2);
TCGv_i64 clean_addr, dirty_addr;
bool is_store = false;
- bool is_signed = false;
bool extend = false;
bool iss_sf;
+ MemOp mop;
if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
unallocated_encoding(s);
return;
}
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ mop = size | MO_ALIGN;
+
switch (opc) {
case 0: /* STLURB */
is_store = true;
@@ -3525,21 +3530,21 @@ static void disas_ldst_ldapr_stlr(DisasContext *s,
uint32_t insn)
unallocated_encoding(s);
return;
}
- is_signed = true;
+ mop |= MO_SIGN;
break;
case 3: /* LDAPURS* 32-bit variant */
if (size > 1) {
unallocated_encoding(s);
return;
}
- is_signed = true;
+ mop |= MO_SIGN;
extend = true; /* zero-extend 32->64 after signed load */
break;
default:
g_assert_not_reached();
}
- iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+ iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
if (rn == 31) {
gen_check_sp_alignment(s);
@@ -3552,13 +3557,13 @@ static void disas_ldst_ldapr_stlr(DisasContext *s,
uint32_t insn)
if (is_store) {
/* Store-Release semantics */
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- do_gpr_st(s, cpu_reg(s, rt), clean_addr, size, true, rt, iss_sf, true);
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
} else {
/*
* Load-AcquirePC semantics; we implement as the slightly more
* restrictive Load-Acquire.
*/
- do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size + is_signed * MO_SIGN,
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
extend, true, rt, iss_sf, true);
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
}
--
2.20.1
- [PULL 39/43] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple), (continued)
- [PULL 39/43] target/arm: Enforce alignment for aa64 vector LDn/STn (multiple), Peter Maydell, 2021/04/30
- [PULL 36/43] target/arm: Use finalize_memop for aa64 fpr load/store, Peter Maydell, 2021/04/30
- [PULL 35/43] target/arm: Use finalize_memop for aa64 gpr load/store, Peter Maydell, 2021/04/30
- [PULL 40/43] target/arm: Enforce alignment for aa64 vector LDn/STn (single), Peter Maydell, 2021/04/30
- [PULL 43/43] hw/pci-host/gpex: Don't fault for unmapped parts of MMIO and PIO windows, Peter Maydell, 2021/04/30
- [PULL 41/43] target/arm: Enforce alignment for sve LD1R, Peter Maydell, 2021/04/30
- [PULL 30/43] target/arm: Enforce alignment for VLDM/VSTM, Peter Maydell, 2021/04/30
- [PULL 23/43] target/arm: Fix SCTLR_B test for TCGv_i64 load/store, Peter Maydell, 2021/04/30
- [PULL 33/43] target/arm: Enforce alignment for VLDn/VSTn (multiple), Peter Maydell, 2021/04/30
- [PULL 32/43] target/arm: Enforce alignment for VLDn (all lanes), Peter Maydell, 2021/04/30
- [PULL 37/43] target/arm: Enforce alignment for aa64 load-acq/store-rel,
Peter Maydell <=
- [PULL 38/43] target/arm: Use MemOp for size + endian in aa64 vector ld/st, Peter Maydell, 2021/04/30
- [PULL 27/43] target/arm: Enforce alignment for LDM/STM, Peter Maydell, 2021/04/30
- [PULL 42/43] hw: add compat machines for 6.1, Peter Maydell, 2021/04/30
- Re: [PULL 00/43] target-arm queue, no-reply, 2021/04/30
- Re: [PULL 00/43] target-arm queue, Peter Maydell, 2021/04/30