[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set
From: |
Yongbok Kim |
Subject: |
[Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set |
Date: |
Tue, 26 May 2015 13:46:35 +0100 |
MO_UNALN caused segfaults when it is set, it reached out of boundary of
load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
or its equivalents.
This patch introduces a new macro get_memalign to be used to get alignment
information from combined op/idx parameter. The get_memop is now returning
without alignment information to avoid the segfaults.
Signed-off-by: Yongbok Kim <address@hidden>
---
softmmu_template.h | 24 ++++++++++++------------
tcg/tcg.h | 14 +++++++++++++-
2 files changed, 25 insertions(+), 13 deletions(-)
diff --git a/softmmu_template.h b/softmmu_template.h
index 4778473..1a1de4a 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -184,7 +184,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong
addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
@@ -218,7 +218,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong
addr,
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ if (get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
@@ -237,7 +237,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong
addr,
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
@@ -271,7 +271,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong
addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
@@ -305,7 +305,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong
addr,
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ if (get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
@@ -324,7 +324,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong
addr,
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
@@ -399,7 +399,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -430,7 +430,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ if (get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -450,7 +450,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -479,7 +479,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -510,7 +510,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
- if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ if (get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -530,7 +530,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong
addr, DATA_TYPE val,
/* Handle aligned access or unaligned access in the same page. */
if ((addr & (DATA_SIZE - 1)) != 0
- && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+ && get_memalign(oi) == MO_ALIGN) {
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 8098f82..849c51a 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -863,7 +863,19 @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op,
unsigned idx)
*/
static inline TCGMemOp get_memop(TCGMemOpIdx oi)
{
- return oi >> 4;
+ return (oi >> 4) & ~MO_AMASK;
+}
+
+/**
+ * get_memalign
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory alignment information from the combined value.
+ */
+
+static inline TCGMemOp get_memalign(TCGMemOpIdx oi)
+{
+ return (oi >> 4) & MO_AMASK;
}
/**
--
1.7.5.4
- [Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set,
Yongbok Kim <=