[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC v8 02/14] softmmu: Simplify helper_*_st_name, wrap un
From: |
Alvise Rigo |
Subject: |
[Qemu-devel] [RFC v8 02/14] softmmu: Simplify helper_*_st_name, wrap unaligned code |
Date: |
Tue, 19 Apr 2016 15:39:19 +0200 |
Attempting to simplify the helper_*_st_name, wrap the
do_unaligned_access code into an shared inline function. As this also
removes the goto statement the inline code is expanded twice in each
helper.
>From Message-id address@hidden:
There is a minor wrinkle that we need to use a unique name for each
inline fragment as the template is included multiple times. For this the
smmu_helper macro does the appropriate glue magic.
I've tested the result with no change to functionality. Comparing the
the objdump of cputlb.o shows minimal changes in probe_write and
everything else is identical.
Suggested-by: Jani Kokkonen <address@hidden>
Suggested-by: Claudio Fontana <address@hidden>
CC: Alvise Rigo <address@hidden>
Signed-off-by: Alex Bennée <address@hidden>
[Alex Bennée: define smmu_helper and unified logic between be/le]
Signed-off-by: Alvise Rigo <address@hidden>
---
softmmu_template.h | 82 ++++++++++++++++++++++++++++++------------------------
1 file changed, 46 insertions(+), 36 deletions(-)
diff --git a/softmmu_template.h b/softmmu_template.h
index 208f808..3eb54f8 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -370,6 +370,46 @@ static inline void glue(io_write, SUFFIX)(CPUArchState
*env,
iotlbentry->attrs);
}
+/* Inline helper functions for SoftMMU
+ *
+ * These functions help reduce code duplication in the various main
+ * helper functions. Constant arguments (like endian state) will allow
+ * the compiler to skip code which is never called in a given inline.
+ */
+#define smmu_helper(name) glue(glue(glue(smmu_helper_, SUFFIX), \
+ MMUSUFFIX), _##name)
+static inline void smmu_helper(do_unl_store)(CPUArchState *env,
+ bool little_endian,
+ DATA_TYPE val,
+ target_ulong addr,
+ TCGMemOpIdx oi,
+ unsigned mmu_idx,
+ uintptr_t retaddr)
+{
+ int i;
+
+ if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
+ /* Note: relies on the fact that tlb_fill() does not remove the
+ * previous page from the TLB cache. */
+ for (i = DATA_SIZE - 1; i >= 0; i--) {
+ uint8_t val8;
+ if (little_endian) {
+ /* Little-endian extract. */
+ val8 = val >> (i * 8);
+ } else {
+ /* Big-endian extract. */
+ val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
+ }
+ /* Note the adjustment at the beginning of the function.
+ Undo that for the recursion. */
+ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
+ oi, retaddr + GETPC_ADJ);
+ }
+}
+
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
@@ -399,7 +439,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
+ smmu_helper(do_unl_store)(env, false, val, addr, oi, mmu_idx,
retaddr);
+ return;
}
iotlbentry = &env->iotlb[mmu_idx][index];
@@ -414,23 +455,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
- int i;
- do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
- /* Little-endian extract. */
- uint8_t val8 = val >> (i * 8);
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr + GETPC_ADJ);
- }
+ smmu_helper(do_unl_store)(env, true, val, addr, oi, mmu_idx, retaddr);
return;
}
@@ -479,7 +504,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
if ((addr & (DATA_SIZE - 1)) != 0) {
- goto do_unaligned_access;
+ smmu_helper(do_unl_store)(env, true, val, addr, oi, mmu_idx,
retaddr);
+ return;
}
iotlbentry = &env->iotlb[mmu_idx][index];
@@ -494,23 +520,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
- int i;
- do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
- mmu_idx, retaddr);
- }
- /* XXX: not efficient, but simple */
- /* Note: relies on the fact that tlb_fill() does not remove the
- * previous page from the TLB cache. */
- for (i = DATA_SIZE - 1; i >= 0; i--) {
- /* Big-endian extract. */
- uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
- /* Note the adjustment at the beginning of the function.
- Undo that for the recursion. */
- glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
- oi, retaddr + GETPC_ADJ);
- }
+ smmu_helper(do_unl_store)(env, false, val, addr, oi, mmu_idx, retaddr);
return;
}
--
2.8.0
- [Qemu-devel] [RFC v8 03/14] softmmu: Simplify helper_*_st_name, wrap MMIO code, (continued)
- [Qemu-devel] [RFC v8 03/14] softmmu: Simplify helper_*_st_name, wrap MMIO code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 05/14] softmmu: Add new TLB_EXCL flag, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 04/14] softmmu: Simplify helper_*_st_name, wrap RAM code, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 01/14] exec.c: Add new exclusive bitmap to ram_list, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 06/14] qom: cpu: Add CPUClass hooks for exclusive range, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 09/14] softmmu: Honor the new exclusive bitmap, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 11/14] tcg: Create new runtime helpers for excl accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 10/14] softmmu: Support MMIO exclusive accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 08/14] softmmu: Add history of excl accesses, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 12/14] target-arm: translate: Use ld/st excl for atomic insns, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 02/14] softmmu: Simplify helper_*_st_name, wrap unaligned code,
Alvise Rigo <=
- [Qemu-devel] [RFC v8 07/14] softmmu: Add helpers for a new slowpath, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 14/14] target-arm: aarch64: Use ls/st exclusive for atomic insns, Alvise Rigo, 2016/04/19
- [Qemu-devel] [RFC v8 13/14] target-arm: cpu64: use custom set_excl hook, Alvise Rigo, 2016/04/19