[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v2 16/20] memory: Single byte swap along the I/O pat
From: |
tony.nguyen |
Subject: |
[Qemu-devel] [PATCH v2 16/20] memory: Single byte swap along the I/O path |
Date: |
Mon, 22 Jul 2019 15:51:20 +0000 |
Now that MemOp has been pushed down into the memory API, we can
collapse the two byte swaps adjust_endianness and handle_bswap into
the former.
Collapsing byte swaps along the I/O path enables additional endian
inversion logic, e.g. SPARC64 Invert Endian TTE bit, with redundant
byte swaps cancelling out.
Signed-off-by: Tony Nguyen <address@hidden>
---
accel/tcg/cputlb.c | 58 +++++++++++++++++++++++++-----------------------------
memory.c | 30 ++++++++++++++++------------
2 files changed, 44 insertions(+), 44 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 97d7a64..6f5262c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -881,7 +881,7 @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int
size,
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
- MMUAccessType access_type, int size)
+ MMUAccessType access_type, MemOp op)
{
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
@@ -906,14 +906,13 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry
*iotlbentry,
qemu_mutex_lock_iothread();
locked = true;
}
- r = memory_region_dispatch_read(mr, mr_offset, &val, SIZE_MEMOP(size),
- iotlbentry->attrs);
+ r = memory_region_dispatch_read(mr, mr_offset, &val, op,
iotlbentry->attrs);
if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset +
section->offset_within_address_space -
section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
+ cpu_transaction_failed(cpu, physaddr, addr, MEMOP_SIZE(op),
access_type,
mmu_idx, iotlbentry->attrs, r, retaddr);
}
if (locked) {
@@ -925,7 +924,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry
*iotlbentry,
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, uint64_t val, target_ulong addr,
- uintptr_t retaddr, int size)
+ uintptr_t retaddr, MemOp op)
{
CPUState *cpu = env_cpu(env);
hwaddr mr_offset;
@@ -947,15 +946,15 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry
*iotlbentry,
qemu_mutex_lock_iothread();
locked = true;
}
- r = memory_region_dispatch_write(mr, mr_offset, val, SIZE_MEMOP(size),
- iotlbentry->attrs);
+ r = memory_region_dispatch_write(mr, mr_offset, val, op,
iotlbentry->attrs);
if (r != MEMTX_OK) {
hwaddr physaddr = mr_offset +
section->offset_within_address_space -
section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
- mmu_idx, iotlbentry->attrs, r, retaddr);
+ cpu_transaction_failed(cpu, physaddr, addr, MEMOP_SIZE(op),
+ MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
+ retaddr);
}
if (locked) {
qemu_mutex_unlock_iothread();
@@ -1210,26 +1209,13 @@ static void *atomic_mmu_lookup(CPUArchState *env,
target_ulong addr,
#endif
/*
- * Byte Swap Helper
+ * Byte Swap Checker
*
- * This should all dead code away depending on the build host and
- * access type.
+ * Dead code should all go away depending on the build host and access type.
*/
-
-static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
+static inline bool need_bswap(bool big_endian)
{
- if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
- switch (size) {
- case 1: return val;
- case 2: return bswap16(val);
- case 4: return bswap32(val);
- case 8: return bswap64(val);
- default:
- g_assert_not_reached();
- }
- } else {
- return val;
- }
+ return (big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP);
}
/*
@@ -1260,6 +1246,7 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
unsigned a_bits = get_alignment_bits(get_tcgmemop(oi));
void *haddr;
uint64_t res;
+ MemOp op;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -1305,9 +1292,13 @@ load_helper(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi,
}
}
- res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
- mmu_idx, addr, retaddr, access_type, size);
- return handle_bswap(res, size, big_endian);
+ op = SIZE_MEMOP(size);
+ if (need_bswap(big_endian)) {
+ op ^= MO_BSWAP;
+ }
+
+ return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
+ mmu_idx, addr, retaddr, access_type, op);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@@ -1508,6 +1499,7 @@ store_helper(CPUArchState *env, target_ulong addr,
uint64_t val,
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
unsigned a_bits = get_alignment_bits(get_tcgmemop(oi));
void *haddr;
+ MemOp op;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -1553,9 +1545,13 @@ store_helper(CPUArchState *env, target_ulong addr,
uint64_t val,
}
}
+ op = SIZE_MEMOP(size);
+ if (need_bswap(big_endian)) {
+ op ^= MO_BSWAP;
+ }
+
io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
- handle_bswap(val, size, big_endian),
- addr, retaddr, size);
+ val, addr, retaddr, op);
return;
}
diff --git a/memory.c b/memory.c
index 73cb345..0aaa0a7 100644
--- a/memory.c
+++ b/memory.c
@@ -350,7 +350,7 @@ static bool memory_region_big_endian(MemoryRegion *mr)
#endif
}
-static bool memory_region_wrong_endianness(MemoryRegion *mr)
+static bool memory_region_endianness_inverted(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
@@ -359,23 +359,27 @@ static bool memory_region_wrong_endianness(MemoryRegion
*mr)
#endif
}
-static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
+static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
{
- if (memory_region_wrong_endianness(mr)) {
- switch (size) {
- case 1:
+ if (memory_region_endianness_inverted(mr)) {
+ op ^= MO_BSWAP;
+ }
+
+ if (op & MO_BSWAP) {
+ switch (op & MO_SIZE) {
+ case MO_8:
break;
- case 2:
+ case MO_16:
*data = bswap16(*data);
break;
- case 4:
+ case MO_32:
*data = bswap32(*data);
break;
- case 8:
+ case MO_64:
*data = bswap64(*data);
break;
default:
- abort();
+ g_assert_not_reached();
}
}
}
@@ -1449,7 +1453,7 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
}
r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
- adjust_endianness(mr, pval, size);
+ adjust_endianness(mr, pval, op);
return r;
}
@@ -1492,7 +1496,7 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
return MEMTX_DECODE_ERROR;
}
- adjust_endianness(mr, &data, size);
+ adjust_endianness(mr, &data, op);
if ((!kvm_eventfds_enabled()) &&
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
@@ -2338,7 +2342,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
}
if (size) {
- adjust_endianness(mr, &mrfd.data, size);
+ adjust_endianness(mr, &mrfd.data, SIZE_MEMOP(size));
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
@@ -2373,7 +2377,7 @@ void memory_region_del_eventfd(MemoryRegion *mr,
unsigned i;
if (size) {
- adjust_endianness(mr, &mrfd.data, size);
+ adjust_endianness(mr, &mrfd.data, SIZE_MEMOP(size));
}
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
--
1.8.3.1
- [Qemu-devel] [PATCH v2 06/20] tcg: Rename get_memop to get_tcgmemop, (continued)
- [Qemu-devel] [PATCH v2 06/20] tcg: Rename get_memop to get_tcgmemop, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 07/20] memory: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 09/20] hw/s390x: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 08/20] target/mips: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 10/20] hw/intc/armv7m_nic: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 11/20] hw/virtio: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 12/20] hw/vfio: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 13/20] exec: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 14/20] cputlb: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 15/20] memory: Access MemoryRegion with MemOp semantics, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v2 16/20] memory: Single byte swap along the I/O path,
tony.nguyen <=
- [Qemu-devel] [PATCH v2 17/20] cpu: TLB_FLAGS_MASK bit to force memory slow path, tony.nguyen, 2019/07/22
- [Qemu-devel] [PATCH v3 00/15] Invert Endian bit in SPARCv9 MMU TTE, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 02/15] memory: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 01/15] tcg: TCGMemOp is now accelerator independent MemOp, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 03/15] target/mips: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 04/15] hw/s390x: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 05/15] hw/intc/armv7m_nic: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 06/15] hw/virtio: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 07/15] hw/vfio: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/25
- [Qemu-devel] [PATCH v3 08/15] exec: Access MemoryRegion with MemOp, tony.nguyen, 2019/07/25