[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 11/13] accel/tcg: Add TLB_CHECK_ALIGNED
From: |
Richard Henderson |
Subject: |
[PATCH 11/13] accel/tcg: Add TLB_CHECK_ALIGNED |
Date: |
Thu, 23 Feb 2023 10:43:40 -1000 |
This creates a per-page method for checking of alignment.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-all.h | 4 +++-
accel/tcg/cputlb.c | 25 ++++++++++++++++++++++++-
2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index f3b2f4229c..5bb04782ba 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -399,8 +399,10 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_BSWAP (1 << 0)
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << 1)
+/* Set if TLB entry requires aligned accesses. */
+#define TLB_CHECK_ALIGNED (1 << 2)
-#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
/* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a90688ac30..c692e71766 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1546,7 +1546,7 @@ static int probe_access_internal(CPUArchState *env,
target_ulong addr,
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
- if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
+ if (flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED)) {
*phost = NULL;
return TLB_MMIO;
}
@@ -1885,6 +1885,29 @@ static bool mmu_lookup(CPUArchState *env, target_ulong
addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}
+ /*
+ * This alignment check differs from the one above, in that this is
+ * based on the atomicity of the operation. The intended use case is
+ * the ARM memory type field of each PTE, where access to pages with
+ * Device memory type require alignment.
+ */
+ if (unlikely(flags & TLB_CHECK_ALIGNED)) {
+ MemOp atmax = l->memop & MO_ATMAX_MASK;
+ MemOp atom = l->memop & MO_ATOM_MASK;
+ MemOp size = l->memop & MO_SIZE;
+
+ if (size != MO_8 && atom != MO_ATOM_NONE) {
+ if (atmax == MO_ATMAX_SIZE) {
+ a_bits = size;
+ } else {
+ a_bits = atmax >> MO_ATMAX_SHIFT;
+ }
+ if (addr & ((1 << a_bits) - 1)) {
+ cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
+ }
+ }
+ }
+
return crosspage;
}
--
2.34.1
- [PATCH 04/13] accel/tcg: Honor TLB_DISCARD_WRITE in atomic_mmu_lookup, (continued)
- [PATCH 04/13] accel/tcg: Honor TLB_DISCARD_WRITE in atomic_mmu_lookup, Richard Henderson, 2023/02/23
- [PATCH 05/13] softmmu/physmem: Check watchpoints for read+write at once, Richard Henderson, 2023/02/23
- [PATCH 06/13] accel/tcg: Trigger watchpoints from atomic_mmu_lookup, Richard Henderson, 2023/02/23
- [PATCH 07/13] accel/tcg: Move TLB_WATCHPOINT to TLB_SLOW_FLAGS_MASK, Richard Henderson, 2023/02/23
- [PATCH 08/13] target/arm: Support 32-byte alignment in pow2_align, Richard Henderson, 2023/02/23
- [PATCH 09/13] exec/memattrs: Remove target_tlb_bit*, Richard Henderson, 2023/02/23
- [PATCH 10/13] accel/tcg: Add tlb_fill_flags to CPUTLBEntryFull, Richard Henderson, 2023/02/23
- [PATCH 11/13] accel/tcg: Add TLB_CHECK_ALIGNED,
Richard Henderson <=
- [PATCH 12/13] target/arm: Do memory type alignment check when translation disabled, Richard Henderson, 2023/02/23
- [PATCH 13/13] target/arm: Do memory type alignment check when translation enabled, Richard Henderson, 2023/02/23