qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 10/48] Lots of ARM TCG changes


From: Riku Voipio
Subject: [Qemu-devel] [PATCH 10/48] Lots of ARM TCG changes
Date: Fri, 26 Mar 2010 16:06:31 +0000

From: Juha Riihimäki <address@hidden>

This patch is flattened set of dozens of patches. These
patches will be sent via another patch series once the commit messages
have been improved.

---
 target-arm/helpers.h     |   11 +
 target-arm/machine.c     |    6 +
 target-arm/neon_helper.c |  269 ++++++++
 target-arm/op_helper.c   |  124 ++++
 target-arm/translate.c   | 1619 ++++++++++++++++++++++++----------------------
 5 files changed, 1263 insertions(+), 766 deletions(-)

diff --git a/target-arm/helpers.h b/target-arm/helpers.h
index 0d1bc47..a5fd2e8 100644
--- a/target-arm/helpers.h
+++ b/target-arm/helpers.h
@@ -160,10 +160,14 @@ DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
 DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32)
 DEF_HELPER_3(neon_qadd_u16, i32, env, i32, i32)
 DEF_HELPER_3(neon_qadd_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_s32, i32, env, i32, i32)
 DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
 DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
 DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
 DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32)
 
 DEF_HELPER_2(neon_hadd_s8, i32, i32, i32)
 DEF_HELPER_2(neon_hadd_u8, i32, i32, i32)
@@ -291,10 +295,13 @@ DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
 
 DEF_HELPER_1(neon_narrow_u8, i32, i64)
 DEF_HELPER_1(neon_narrow_u16, i32, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
 DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
@@ -447,4 +454,8 @@ DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
 
 DEF_HELPER_2(set_teecr, void, env, i32)
 
+DEF_HELPER_2(neon_unzip, void, env, i32)
+DEF_HELPER_2(neon_zip, void, env, i32)
+DEF_HELPER_2(neon_vldst_all, void, env, i32)
+
 #include "def-helper.h"
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 3925d3a..8595549 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -26,6 +26,9 @@ void cpu_save(QEMUFile *f, void *opaque)
     qemu_put_be32(f, env->cp15.c1_sys);
     qemu_put_be32(f, env->cp15.c1_coproc);
     qemu_put_be32(f, env->cp15.c1_xscaleauxcr);
+    qemu_put_be32(f, env->cp15.c1_secfg);
+    qemu_put_be32(f, env->cp15.c1_sedbg);
+    qemu_put_be32(f, env->cp15.c1_nseac);
     qemu_put_be32(f, env->cp15.c2_base0);
     qemu_put_be32(f, env->cp15.c2_base1);
     qemu_put_be32(f, env->cp15.c2_control);
@@ -133,6 +136,9 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
     env->cp15.c1_sys = qemu_get_be32(f);
     env->cp15.c1_coproc = qemu_get_be32(f);
     env->cp15.c1_xscaleauxcr = qemu_get_be32(f);
+    env->cp15.c1_secfg = qemu_get_be32(f);
+    env->cp15.c1_sedbg = qemu_get_be32(f);
+    env->cp15.c1_nseac = qemu_get_be32(f);
     env->cp15.c2_base0 = qemu_get_be32(f);
     env->cp15.c2_base1 = qemu_get_be32(f);
     env->cp15.c2_control = qemu_get_be32(f);
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
index 5e6452b..924bf40 100644
--- a/target-arm/neon_helper.c
+++ b/target-arm/neon_helper.c
@@ -198,6 +198,16 @@ NEON_VOP_ENV(qadd_u16, neon_u16, 2)
 #undef NEON_FN
 #undef NEON_USAT
 
+uint32_t HELPER(neon_qadd_u32)(CPUState *env, uint32_t a, uint32_t b)
+{
+    uint32_t res = a + b;
+    if (res < a) {
+        SET_QC();
+        res = ~0;
+    }
+    return res;
+}
+
 #define NEON_SSAT(dest, src1, src2, type) do { \
     int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
     if (tmp != (type)tmp) { \
@@ -218,6 +228,16 @@ NEON_VOP_ENV(qadd_s16, neon_s16, 2)
 #undef NEON_FN
 #undef NEON_SSAT
 
+uint32_t HELPER(neon_qadd_s32)(CPUState *env, uint32_t a, uint32_t b)
+{
+    uint32_t res = a + b;
+    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
+        SET_QC();
+        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+    }
+    return res;
+}
+
 #define NEON_USAT(dest, src1, src2, type) do { \
     uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
     if (tmp != (type)tmp) { \
@@ -234,6 +254,16 @@ NEON_VOP_ENV(qsub_u16, neon_u16, 2)
 #undef NEON_FN
 #undef NEON_USAT
 
+uint32_t HELPER(neon_qsub_u32)(CPUState *env, uint32_t a, uint32_t b)
+{
+    uint32_t res = a - b;
+    if (res > a) {
+        SET_QC();
+        res = 0;
+    }
+    return res;
+}
+
 #define NEON_SSAT(dest, src1, src2, type) do { \
     int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
     if (tmp != (type)tmp) { \
@@ -254,6 +284,16 @@ NEON_VOP_ENV(qsub_s16, neon_s16, 2)
 #undef NEON_FN
 #undef NEON_SSAT
 
+uint32_t HELPER(neon_qsub_s32)(CPUState *env, uint32_t a, uint32_t b)
+{
+    uint32_t res = a - b;
+    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
+        SET_QC();
+        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+    }
+    return res;
+}
+
 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
 NEON_VOP(hadd_s8, neon_s8, 4)
 NEON_VOP(hadd_u8, neon_u8, 4)
@@ -909,6 +949,33 @@ uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
     return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
 }
 
+uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
+{
+    uint16_t s;
+    uint8_t d;
+    uint32_t res = 0;
+#define SAT8(n) \
+    s = x >> n; \
+    if (s & 0x8000) { \
+        SET_QC(); \
+    } else { \
+        if (s > 0xff) { \
+            d = 0xff; \
+            SET_QC(); \
+        } else  { \
+            d = s; \
+        } \
+        res |= (uint32_t)d << (n / 2); \
+    }
+    
+    SAT8(0);
+    SAT8(16);
+    SAT8(32);
+    SAT8(48);
+#undef SAT8
+    return res;
+}
+
 uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
 {
     uint16_t s;
@@ -955,6 +1022,29 @@ uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, 
uint64_t x)
     return res;
 }
 
+uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
+{
+    uint32_t high;
+    uint32_t low;
+    low = x;
+    if (low & 0x80000000) {
+        low = 0;
+        SET_QC();
+    } else if (low > 0xffff) {
+        low = 0xffff;
+        SET_QC();
+    }
+    high = x >> 32;
+    if (high & 0x80000000) {
+        high = 0;
+        SET_QC();
+    } else if (high > 0xffff) {
+        high = 0xffff;
+        SET_QC();
+    }
+    return low | (high << 16);
+}
+
 uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
 {
     uint32_t high;
@@ -989,6 +1079,19 @@ uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, 
uint64_t x)
     return (uint16_t)low | (high << 16);
 }
 
+uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
+{
+    if (x & 0x8000000000000000ull) {
+        SET_QC();
+        return 0;
+    }
+    if (x > 0xffffffffu) {
+        SET_QC();
+        return 0xffffffffu;
+    }
+    return x;
+}
+
 uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
 {
     if (x > 0xffffffffu) {
@@ -1457,3 +1560,169 @@ uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
     float32 f1 = float32_abs(vfp_itos(b));
     return (float32_compare_quiet(f0, f1, NFS) > 0) ? ~0 : 0;
 }
+
+#define ELEM(V, N, SIZE) (uint64_t)(((uint64_t)(V) >> ((N) * (SIZE))) & ((1ull 
<< (SIZE)) - 1))
+
+void HELPER(neon_unzip)(CPUState *env, uint32_t insn)
+{
+    int rd = ((insn >> 18) & 0x10) | ((insn >> 12) & 0x0f);
+    int rm = ((insn >> 1) & 0x10) | (insn & 0x0f);
+    int size = (insn >> 18) & 3;
+    if (insn & 0x40) { /* Q */
+        uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+        uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+        uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+        uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+        uint64_t m0 = 0, m1 = 0, d0 = 0, d1 = 0;
+        switch (size) {
+            case 0:
+                d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
+                     | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
+                     | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
+                     | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
+                d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
+                     | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
+                     | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
+                     | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
+                m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
+                     | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
+                     | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
+                     | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
+                m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
+                     | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
+                     | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
+                     | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
+                break;
+            case 1:
+                d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
+                     | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
+                d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
+                     | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
+                m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
+                     | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
+                m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
+                     | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
+                break;
+            case 2:
+                d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
+                d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
+                m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
+                m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
+                break;
+            default:
+                break;
+        }
+        env->vfp.regs[rm] = make_float64(m0);
+        env->vfp.regs[rm + 1] = make_float64(m1);
+        env->vfp.regs[rd] = make_float64(d0);
+        env->vfp.regs[rd + 1] = make_float64(d1);
+    } else {
+        uint64_t zm = float64_val(env->vfp.regs[rm]);
+        uint64_t zd = float64_val(env->vfp.regs[rd]);
+        uint64_t m = 0, d = 0;
+        switch (size) {
+            case 0:
+                d = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
+                    | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
+                    | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
+                    | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
+                m = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
+                    | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
+                    | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
+                    | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
+                break;
+            case 1:
+                d = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
+                    | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
+                m = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
+                    | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
+                break;
+            default:
+                /* size == 2 is a no-op for doubleword vectors */
+                break;
+        }
+        env->vfp.regs[rm] = make_float64(m);
+        env->vfp.regs[rd] = make_float64(d);
+    }
+}
+
+void HELPER(neon_zip)(CPUState *env, uint32_t insn)
+{
+    int rd = ((insn >> 18) & 0x10) | ((insn >> 12) & 0x0f);
+    int rm = ((insn >> 1) & 0x10) | (insn & 0x0f);
+    int size = (insn >> 18) & 3;
+    if (insn & 0x40) { /* Q */
+        uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+        uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+        uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+        uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+        uint64_t m0 = 0, m1 = 0, d0 = 0, d1 = 0;
+        switch (size) {
+            case 0:
+                d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
+                     | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
+                     | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
+                     | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
+                d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
+                     | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
+                     | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
+                     | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
+                m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
+                     | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
+                     | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
+                     | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
+                m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
+                     | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
+                     | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
+                     | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
+                break;
+            case 1:
+                d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
+                     | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
+                d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
+                     | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
+                m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
+                     | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
+                m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
+                     | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
+                break;
+            case 2:
+                d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
+                d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
+                m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
+                m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
+                break;
+        }
+        env->vfp.regs[rm] = make_float64(m0);
+        env->vfp.regs[rm + 1] = make_float64(m1);
+        env->vfp.regs[rd] = make_float64(d0);
+        env->vfp.regs[rd + 1] = make_float64(d1);
+    } else {
+        uint64_t zm = float64_val(env->vfp.regs[rm]);
+        uint64_t zd = float64_val(env->vfp.regs[rd]);
+        uint64_t m = 0, d = 0;
+        switch (size) {
+            case 0:
+                d = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
+                    | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
+                    | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
+                    | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
+                m = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
+                    | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
+                    | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
+                    | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
+                break;
+            case 1:
+                d = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
+                    | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
+                m = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
+                    | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
+                break;
+            default:
+                /* size == 2 is a no-op for doubleword vectors */
+                break;
+        }
+        env->vfp.regs[rm] = make_float64(m);
+        env->vfp.regs[rd] = make_float64(d);
+    }
+}
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 9b1a014..749c442 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -487,3 +487,127 @@ uint64_t HELPER(neon_sub_saturate_u64)(uint64_t src1, 
uint64_t src2)
     }
     return res;
 }
+
+void HELPER(neon_vldst_all)(CPUState *env, uint32_t insn)
+{
+#if defined(CONFIG_USER_ONLY)
+#define LDB(addr) ldub(addr)
+#define LDW(addr) lduw(addr)
+#define LDL(addr) ldl(addr)
+#define LDQ(addr) ldq(addr)
+#define STB(addr, val) stb(addr, val)
+#define STW(addr, val) stw(addr, val)
+#define STL(addr, val) stl(addr, val)
+#define STQ(addr, val) stq(addr, val)
+#else
+    int user = cpu_mmu_index(env);
+#define LDB(addr) slow_ldb_mmu(addr, user, GETPC())
+#define LDW(addr) slow_ldw_mmu(addr, user, GETPC())
+#define LDL(addr) slow_ldl_mmu(addr, user, GETPC())
+#define LDQ(addr) slow_ldq_mmu(addr, user, GETPC())
+#define STB(addr, val) slow_stb_mmu(addr, val, user, GETPC())
+#define STW(addr, val) slow_stw_mmu(addr, val, user, GETPC())
+#define STL(addr, val) slow_stl_mmu(addr, val, user, GETPC())
+#define STQ(addr, val) slow_stq_mmu(addr, val, user, GETPC())
+#endif
+    static const struct {
+        int nregs;
+        int interleave;
+        int spacing;
+    } neon_ls_element_type[11] = {
+        {4, 4, 1},
+        {4, 4, 2},
+        {4, 1, 1},
+        {4, 2, 1},
+        {3, 3, 1},
+        {3, 3, 2},
+        {3, 1, 1},
+        {1, 1, 1},
+        {2, 2, 1},
+        {2, 2, 2},
+        {2, 1, 1}
+    };
+    
+    const int op = (insn >> 8) & 0xf;
+    const int size = (insn >> 6) & 3;
+    int rd = ((insn >> 12) & 0x0f) | ((insn >> 18) & 0x10);
+    const int rn = (insn >> 16) & 0xf;
+    const int load = (insn & (1 << 21)) != 0;
+    const int nregs = neon_ls_element_type[op].nregs;
+    const int interleave = neon_ls_element_type[op].interleave;
+    const int spacing = neon_ls_element_type[op].spacing;
+    uint32_t addr = env->regs[rn];
+    const int stride = (1 << size) * interleave;
+    int i, reg;
+    uint64_t tmp64;
+    
+    for (reg = 0; reg < nregs; reg++) {
+        if (interleave > 2 || (interleave == 2 && nregs == 2)) {
+            addr = env->regs[rn] + (1 << size) * reg;
+        } else if (interleave == 2 && nregs == 4 && reg == 2) {
+            addr = env->regs[rn] + (1 << size);
+        }
+        switch (size) {
+            case 3:
+                if (load) {
+                    env->vfp.regs[rd] = make_float64(LDQ(addr));
+                } else {
+                    STQ(addr, float64_val(env->vfp.regs[rd]));
+                }
+                addr += stride;
+                break;
+            case 2:
+                if (load) {
+                    tmp64 = (uint32_t)LDL(addr);
+                    addr += stride;
+                    tmp64 |= (uint64_t)LDL(addr) << 32;
+                    addr += stride;
+                    env->vfp.regs[rd] = make_float64(tmp64);
+                } else {
+                    tmp64 = float64_val(env->vfp.regs[rd]);
+                    STL(addr, tmp64);
+                    addr += stride;
+                    STL(addr, tmp64 >> 32);
+                    addr += stride;
+                }
+                break;
+            case 1:
+                if (load) {
+                    tmp64 = 0ull;
+                    for (i = 0; i < 4; i++, addr += stride) {
+                        tmp64 |= (uint64_t)LDW(addr) << (i * 16);
+                    }
+                    env->vfp.regs[rd] = make_float64(tmp64);
+                } else {
+                    tmp64 = float64_val(env->vfp.regs[rd]);
+                    for (i = 0; i < 4; i++, addr += stride, tmp64 >>= 16) {
+                        STW(addr, tmp64);
+                    }
+                }
+                break;
+            case 0:
+                if (load) {
+                    tmp64 = 0ull;
+                    for (i = 0; i < 8; i++, addr += stride) {
+                        tmp64 |= (uint64_t)LDB(addr) << (i * 8);
+                    }
+                    env->vfp.regs[rd] = make_float64(tmp64);
+                } else {
+                    tmp64 = float64_val(env->vfp.regs[rd]);
+                    for (i = 0; i < 8; i++, addr += stride, tmp64 >>= 8) {
+                        STB(addr, tmp64);
+                    }
+                }
+                break;
+        }
+        rd += spacing;
+    }
+#undef LDB
+#undef LDW
+#undef LDL
+#undef LDQ
+#undef STB
+#undef STW
+#undef STL
+#undef STQ
+}
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 3b84c1d..b214bff 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -34,6 +34,8 @@
 #define GEN_HELPER 1
 #include "helpers.h"
 
+//#define RESOURCE_LEAK_DEBUG
+
 #define ENABLE_ARCH_5J    0
 #define ENABLE_ARCH_6     arm_feature(env, ARM_FEATURE_V6)
 #define ENABLE_ARCH_6K   arm_feature(env, ARM_FEATURE_V6K)
@@ -123,22 +125,97 @@ void arm_translate_init(void)
 #include "helpers.h"
 }
 
+#ifdef RESOURCE_LEAK_DEBUG
 static int num_temps;
 
 /* Allocate a temporary variable.  */
-static TCGv_i32 new_tmp(void)
+static inline TCGv_i32 new_tmp(void)
 {
     num_temps++;
     return tcg_temp_new_i32();
 }
 
+static inline TCGv_i32 new_tmplocal32(void)
+{
+    num_temps++;
+    return tcg_temp_local_new_i32();
+}
+
+static inline TCGv new_tmplocal(void)
+{
+    num_temps++;
+    return tcg_temp_local_new();
+}
+
+static inline TCGv_i64 new_tmp64(void)
+{
+    num_temps++;
+    return tcg_temp_new_i64();
+}
+
+static inline TCGv_ptr new_tmpptr(void)
+{
+    num_temps++;
+    return tcg_temp_new_ptr();
+}
+
+static inline TCGv_i32 new_const(uint32_t value)
+{
+    num_temps++;
+    return tcg_const_i32(value);
+}
+
+static inline TCGv_i64 new_const64(uint64_t value)
+{
+    num_temps++;
+    return tcg_const_i64(value);
+}
+
 /* Release a temporary variable.  */
-static void dead_tmp(TCGv tmp)
+static inline void dead_tmp(TCGv tmp)
+{
+    tcg_temp_free_i32(tmp);
+    num_temps--;
+}
+
+static inline void dead_tmp64(TCGv_i64 tmp)
+{
+    tcg_temp_free_i64(tmp);
+    num_temps--;
+}
+
+static inline void dead_tmp_(TCGv tmp)
 {
     tcg_temp_free(tmp);
     num_temps--;
 }
 
+static inline void dead_tmpptr(TCGv_ptr tmp)
+{
+    tcg_temp_free_ptr(tmp);
+    num_temps--;
+}
+
+#undef tcg_temp_local_new
+#undef tcg_temp_new_ptr
+#undef tcg_temp_free
+#undef tcg_temp_free_ptr
+#define tcg_temp_new_i32() new_tmp()
+#define tcg_temp_new_i64() new_tmp64()
+#define tcg_temp_local_new() new_tmplocal()
+#define tcg_temp_local_new_i32() new_tmplocal32()
+#define tcg_temp_new_ptr() new_tmpptr()
+#define tcg_const_i32(x) new_const(x)
+#define tcg_const_i64(x) new_const64(x)
+#define tcg_temp_free(x) dead_tmp_(x)
+#define tcg_temp_free_i32(x) dead_tmp(x)
+#define tcg_temp_free_i64(x) dead_tmp64(x)
+#define tcg_temp_free_ptr(x) dead_tmpptr(x)
+#else // RESOURCE_LEAK_DEBUG
+#define new_tmp() tcg_temp_new_i32()
+#define dead_tmp(x) tcg_temp_free_i32(x)
+#endif // RESOOURCE_LEAK_DEBUG
+
 static inline TCGv load_cpu_offset(int offset)
 {
     TCGv tmp = new_tmp();
@@ -1691,8 +1768,8 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext 
*s, uint32_t insn)
             TCGV_UNUSED(tmp3);
         }
         gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
-        tcg_temp_free(tmp3);
-        tcg_temp_free(tmp2);
+        tcg_temp_free_i32(tmp3);
+        tcg_temp_free_i32(tmp2);
         dead_tmp(tmp);
         gen_op_iwmmxt_movq_wRn_M0(wrd);
         gen_op_iwmmxt_set_mup();
@@ -2151,7 +2228,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext 
*s, uint32_t insn)
         tmp = tcg_const_i32((insn >> 20) & 3);
         iwmmxt_load_reg(cpu_V1, rd1);
         gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
-        tcg_temp_free(tmp);
+        tcg_temp_free_i32(tmp);
         gen_op_iwmmxt_movq_wRn_M0(wrd);
         gen_op_iwmmxt_set_mup();
         break;
@@ -2207,7 +2284,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext 
*s, uint32_t insn)
         gen_op_iwmmxt_movq_M0_wRn(rd0);
         tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
         gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
-        tcg_temp_free(tmp);
+        tcg_temp_free_i32(tmp);
         gen_op_iwmmxt_movq_wRn_M0(wrd);
         gen_op_iwmmxt_set_mup();
         gen_op_iwmmxt_set_cup();
@@ -2420,7 +2497,7 @@ static int disas_cp_insn(CPUState *env, DisasContext *s, 
uint32_t insn)
         tmp = new_tmp();
         tmp2 = tcg_const_i32(insn);
         gen_helper_get_cp(tmp, cpu_env, tmp2);
-        tcg_temp_free(tmp2);
+        tcg_temp_free_i32(tmp2);
         store_reg(s, rd, tmp);
     } else {
         if (!env->cp[cp].cp_write)
@@ -2429,7 +2506,7 @@ static int disas_cp_insn(CPUState *env, DisasContext *s, 
uint32_t insn)
         tmp = load_reg(s, rd);
         tmp2 = tcg_const_i32(insn);
         gen_helper_set_cp(cpu_env, tmp2, tmp);
-        tcg_temp_free(tmp2);
+        tcg_temp_free_i32(tmp2);
         dead_tmp(tmp);
     }
     return 0;
@@ -3311,6 +3388,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext 
*s, uint32_t insn)
                     offset = 8;
                 else
                     offset = 4;
+                tmp = tcg_const_i32(offset);
                 for (i = 0; i < n; i++) {
                     if (insn & ARM_CP_RW_BIT) {
                         /* load */
@@ -3321,8 +3399,9 @@ static int disas_vfp_insn(CPUState * env, DisasContext 
*s, uint32_t insn)
                         gen_mov_F0_vreg(dp, rd + i);
                         gen_vfp_st(s, dp, addr);
                     }
-                    tcg_gen_addi_i32(addr, addr, offset);
+                    tcg_gen_add_i32(addr, addr, tmp);
                 }
+                tcg_temp_free_i32(tmp);
                 if (insn & (1 << 21)) {
                     /* writeback */
                     if (insn & (1 << 24))
@@ -3524,12 +3603,6 @@ static inline void gen_neon_rsb(int size, TCGv t0, TCGv 
t1)
 #define gen_helper_neon_pmin_s32  gen_helper_neon_min_s32
 #define gen_helper_neon_pmin_u32  gen_helper_neon_min_u32
 
-/* FIXME: This is wrong.  They set the wrong overflow bit.  */
-#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
-#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
-#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
-#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
-
 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
     switch ((size << 1) | u) { \
     case 0: \
@@ -3593,128 +3666,18 @@ static inline TCGv neon_get_scalar(int size, int reg)
 {
     TCGv tmp;
     if (size == 1) {
-        tmp = neon_load_reg(reg >> 1, reg & 1);
-    } else {
-        tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
-        if (reg & 1) {
-            gen_neon_dup_low16(tmp);
-        } else {
+        tmp = neon_load_reg(reg & 7, reg >> 4);
+        if (reg & 8) {
             gen_neon_dup_high16(tmp);
+        } else {
+            gen_neon_dup_low16(tmp);
         }
+    } else {
+        tmp = neon_load_reg(reg & 15, reg >> 4);
     }
     return tmp;
 }
 
-static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
-{
-    TCGv rd, rm, tmp;
-
-    rd = new_tmp();
-    rm = new_tmp();
-    tmp = new_tmp();
-
-    tcg_gen_andi_i32(rd, t0, 0xff);
-    tcg_gen_shri_i32(tmp, t0, 8);
-    tcg_gen_andi_i32(tmp, tmp, 0xff00);
-    tcg_gen_or_i32(rd, rd, tmp);
-    tcg_gen_shli_i32(tmp, t1, 16);
-    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
-    tcg_gen_or_i32(rd, rd, tmp);
-    tcg_gen_shli_i32(tmp, t1, 8);
-    tcg_gen_andi_i32(tmp, tmp, 0xff000000);
-    tcg_gen_or_i32(rd, rd, tmp);
-
-    tcg_gen_shri_i32(rm, t0, 8);
-    tcg_gen_andi_i32(rm, rm, 0xff);
-    tcg_gen_shri_i32(tmp, t0, 16);
-    tcg_gen_andi_i32(tmp, tmp, 0xff00);
-    tcg_gen_or_i32(rm, rm, tmp);
-    tcg_gen_shli_i32(tmp, t1, 8);
-    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
-    tcg_gen_or_i32(rm, rm, tmp);
-    tcg_gen_andi_i32(tmp, t1, 0xff000000);
-    tcg_gen_or_i32(t1, rm, tmp);
-    tcg_gen_mov_i32(t0, rd);
-
-    dead_tmp(tmp);
-    dead_tmp(rm);
-    dead_tmp(rd);
-}
-
-static void gen_neon_zip_u8(TCGv t0, TCGv t1)
-{
-    TCGv rd, rm, tmp;
-
-    rd = new_tmp();
-    rm = new_tmp();
-    tmp = new_tmp();
-
-    tcg_gen_andi_i32(rd, t0, 0xff);
-    tcg_gen_shli_i32(tmp, t1, 8);
-    tcg_gen_andi_i32(tmp, tmp, 0xff00);
-    tcg_gen_or_i32(rd, rd, tmp);
-    tcg_gen_shli_i32(tmp, t0, 16);
-    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
-    tcg_gen_or_i32(rd, rd, tmp);
-    tcg_gen_shli_i32(tmp, t1, 24);
-    tcg_gen_andi_i32(tmp, tmp, 0xff000000);
-    tcg_gen_or_i32(rd, rd, tmp);
-
-    tcg_gen_andi_i32(rm, t1, 0xff000000);
-    tcg_gen_shri_i32(tmp, t0, 8);
-    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
-    tcg_gen_or_i32(rm, rm, tmp);
-    tcg_gen_shri_i32(tmp, t1, 8);
-    tcg_gen_andi_i32(tmp, tmp, 0xff00);
-    tcg_gen_or_i32(rm, rm, tmp);
-    tcg_gen_shri_i32(tmp, t0, 16);
-    tcg_gen_andi_i32(tmp, tmp, 0xff);
-    tcg_gen_or_i32(t1, rm, tmp);
-    tcg_gen_mov_i32(t0, rd);
-
-    dead_tmp(tmp);
-    dead_tmp(rm);
-    dead_tmp(rd);
-}
-
-static void gen_neon_zip_u16(TCGv t0, TCGv t1)
-{
-    TCGv tmp, tmp2;
-
-    tmp = new_tmp();
-    tmp2 = new_tmp();
-
-    tcg_gen_andi_i32(tmp, t0, 0xffff);
-    tcg_gen_shli_i32(tmp2, t1, 16);
-    tcg_gen_or_i32(tmp, tmp, tmp2);
-    tcg_gen_andi_i32(t1, t1, 0xffff0000);
-    tcg_gen_shri_i32(tmp2, t0, 16);
-    tcg_gen_or_i32(t1, t1, tmp2);
-    tcg_gen_mov_i32(t0, tmp);
-
-    dead_tmp(tmp2);
-    dead_tmp(tmp);
-}
-
-static void gen_neon_unzip(int reg, int q, int tmp, int size)
-{
-    int n;
-    TCGv t0, t1;
-
-    for (n = 0; n < q + 1; n += 2) {
-        t0 = neon_load_reg(reg, n);
-        t1 = neon_load_reg(reg, n + 1);
-        switch (size) {
-        case 0: gen_neon_unzip_u8(t0, t1); break;
-        case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the 
same.  */
-        case 2: /* no-op */; break;
-        default: abort();
-        }
-        neon_store_scratch(tmp + n, t0);
-        neon_store_scratch(tmp + n + 1, t1);
-    }
-}
-
 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
 {
     TCGv rd, tmp;
@@ -3756,7 +3719,6 @@ static void gen_neon_trn_u16(TCGv t0, TCGv t1)
     dead_tmp(rd);
 }
 
-
 static struct {
     int nregs;
     int interleave;
@@ -3790,11 +3752,9 @@ static int disas_neon_ls_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
     int pass;
     int load;
     int shift;
-    int n;
     TCGv addr;
     TCGv tmp;
     TCGv tmp2;
-    TCGv_i64 tmp64;
 
     if (!vfp_enabled(env))
       return 1;
@@ -3807,103 +3767,28 @@ static int disas_neon_ls_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
         /* Load store all elements.  */
         op = (insn >> 8) & 0xf;
         size = (insn >> 6) & 3;
-        if (op > 10)
+        if (op > 10) {
+            dead_tmp(addr);
             return 1;
+        }
         nregs = neon_ls_element_type[op].nregs;
         interleave = neon_ls_element_type[op].interleave;
         spacing = neon_ls_element_type[op].spacing;
-        if (size == 3 && (interleave | spacing) != 1)
+        if (size == 3 && (interleave | spacing) != 1) {
+            dead_tmp(addr);
             return 1;
-        load_reg_var(s, addr, rn);
-        stride = (1 << size) * interleave;
-        for (reg = 0; reg < nregs; reg++) {
-            if (interleave > 2 || (interleave == 2 && nregs == 2)) {
-                load_reg_var(s, addr, rn);
-                tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
-            } else if (interleave == 2 && nregs == 4 && reg == 2) {
-                load_reg_var(s, addr, rn);
-                tcg_gen_addi_i32(addr, addr, 1 << size);
-            }
-            if (size == 3) {
-                if (load) {
-                    tmp64 = gen_ld64(addr, IS_USER(s));
-                    neon_store_reg64(tmp64, rd);
-                    tcg_temp_free_i64(tmp64);
-                } else {
-                    tmp64 = tcg_temp_new_i64();
-                    neon_load_reg64(tmp64, rd);
-                    gen_st64(tmp64, addr, IS_USER(s));
-                }
-                tcg_gen_addi_i32(addr, addr, stride);
-            } else {
-                for (pass = 0; pass < 2; pass++) {
-                    if (size == 2) {
-                        if (load) {
-                            tmp = gen_ld32(addr, IS_USER(s));
-                            neon_store_reg(rd, pass, tmp);
-                        } else {
-                            tmp = neon_load_reg(rd, pass);
-                            gen_st32(tmp, addr, IS_USER(s));
-                        }
-                        tcg_gen_addi_i32(addr, addr, stride);
-                    } else if (size == 1) {
-                        if (load) {
-                            tmp = gen_ld16u(addr, IS_USER(s));
-                            tcg_gen_addi_i32(addr, addr, stride);
-                            tmp2 = gen_ld16u(addr, IS_USER(s));
-                            tcg_gen_addi_i32(addr, addr, stride);
-                            gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
-                            dead_tmp(tmp2);
-                            neon_store_reg(rd, pass, tmp);
-                        } else {
-                            tmp = neon_load_reg(rd, pass);
-                            tmp2 = new_tmp();
-                            tcg_gen_shri_i32(tmp2, tmp, 16);
-                            gen_st16(tmp, addr, IS_USER(s));
-                            tcg_gen_addi_i32(addr, addr, stride);
-                            gen_st16(tmp2, addr, IS_USER(s));
-                            tcg_gen_addi_i32(addr, addr, stride);
-                        }
-                    } else /* size == 0 */ {
-                        if (load) {
-                            TCGV_UNUSED(tmp2);
-                            for (n = 0; n < 4; n++) {
-                                tmp = gen_ld8u(addr, IS_USER(s));
-                                tcg_gen_addi_i32(addr, addr, stride);
-                                if (n == 0) {
-                                    tmp2 = tmp;
-                                } else {
-                                    gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
-                                    dead_tmp(tmp);
-                                }
-                            }
-                            neon_store_reg(rd, pass, tmp2);
-                        } else {
-                            tmp2 = neon_load_reg(rd, pass);
-                            for (n = 0; n < 4; n++) {
-                                tmp = new_tmp();
-                                if (n == 0) {
-                                    tcg_gen_mov_i32(tmp, tmp2);
-                                } else {
-                                    tcg_gen_shri_i32(tmp, tmp2, n * 8);
-                                }
-                                gen_st8(tmp, addr, IS_USER(s));
-                                tcg_gen_addi_i32(addr, addr, stride);
-                            }
-                            dead_tmp(tmp2);
-                        }
-                    }
-                }
-            }
-            rd += spacing;
         }
+        tcg_gen_movi_i32(addr, insn);
+        gen_helper_neon_vldst_all(cpu_env, addr);
         stride = nregs * 8;
     } else {
         size = (insn >> 10) & 3;
         if (size == 3) {
             /* Load single element to all lanes.  */
-            if (!load)
+            if (!load) {
+                dead_tmp(addr);
                 return 1;
+            }
             size = (insn >> 6) & 3;
             nregs = ((insn >> 8) & 3) + 1;
             stride = (insn & (1 << 5)) ? 2 : 1;
@@ -3922,6 +3807,7 @@ static int disas_neon_ls_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                     tmp = gen_ld32(addr, IS_USER(s));
                     break;
                 case 3:
+                    dead_tmp(addr);
                     return 1;
                 default: /* Avoid compiler warnings.  */
                     abort();
@@ -4054,6 +3940,16 @@ static inline void gen_neon_narrow_satu(int size, TCGv 
dest, TCGv_i64 src)
     }
 }
 
+static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
+{
+    switch(size) {
+    case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
+    case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
+    case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
+    default: abort();
+    }
+}
+
 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
                                          int q, int u)
 {
@@ -4074,8 +3970,8 @@ static inline void gen_neon_shift_narrow(int size, TCGv 
var, TCGv shift,
     } else {
         if (u) {
             switch (size) {
-            case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
-            case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
+            case 1: gen_helper_neon_shl_u16(var, var, shift); break;
+            case 2: gen_helper_neon_shl_u32(var, var, shift); break;
             default: abort();
             }
         } else {
@@ -4189,8 +4085,9 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
     TCGv tmp, tmp2, tmp3, tmp4, tmp5;
     TCGv_i64 tmp64;
 
-    if (!vfp_enabled(env))
-      return 1;
+    if (!vfp_enabled(env)) {
+        return 1;
+    }
     q = (insn & (1 << 6)) != 0;
     u = (insn >> 24) & 1;
     VFP_DREG_D(rd, insn);
@@ -4200,73 +4097,82 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
     if ((insn & (1 << 23)) == 0) {
         /* Three register same length.  */
         op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
-        if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
-                          || op == 10 || op  == 11 || op == 16)) {
-            /* 64-bit element instructions.  */
-            for (pass = 0; pass < (q ? 2 : 1); pass++) {
-                neon_load_reg64(cpu_V0, rn + pass);
-                neon_load_reg64(cpu_V1, rm + pass);
-                switch (op) {
-                case 1: /* VQADD */
-                    if (u) {
-                        gen_helper_neon_add_saturate_u64(CPU_V001);
-                    } else {
-                        gen_helper_neon_add_saturate_s64(CPU_V001);
-                    }
-                    break;
-                case 5: /* VQSUB */
-                    if (u) {
-                        gen_helper_neon_sub_saturate_u64(CPU_V001);
-                    } else {
-                        gen_helper_neon_sub_saturate_s64(CPU_V001);
-                    }
-                    break;
-                case 8: /* VSHL */
-                    if (u) {
-                        gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
-                    } else {
-                        gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
-                    }
-                    break;
-                case 9: /* VQSHL */
-                    if (u) {
-                        gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
-                                                 cpu_V0, cpu_V0);
-                    } else {
-                        gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
-                                                 cpu_V1, cpu_V0);
-                    }
-                    break;
-                case 10: /* VRSHL */
-                    if (u) {
-                        gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
-                    } else {
-                        gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
-                    }
-                    break;
-                case 11: /* VQRSHL */
-                    if (u) {
-                        gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
-                                                  cpu_V1, cpu_V0);
-                    } else {
-                        gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
-                                                  cpu_V1, cpu_V0);
-                    }
-                    break;
-                case 16:
-                    if (u) {
-                        tcg_gen_sub_i64(CPU_V001);
-                    } else {
-                        tcg_gen_add_i64(CPU_V001);
+        if (op == 24 || op == 25 || (q && ((rd | rn | rm) & 1))) {
+            return 1;
+        }
+        if (size == 3) {
+            if (op == 1 || op == 5 || op == 8 || op == 9 || op == 10
+                || op == 11 || op == 16) {
+                /* 64-bit element instructions.  */
+                for (pass = 0; pass < (q ? 2 : 1); pass++) {
+                    neon_load_reg64(cpu_V0, rn + pass);
+                    neon_load_reg64(cpu_V1, rm + pass);
+                    switch (op) {
+                    case 1: /* VQADD */
+                        if (u) {
+                            gen_helper_neon_add_saturate_u64(CPU_V001);
+                        } else {
+                            gen_helper_neon_add_saturate_s64(CPU_V001);
+                        }
+                        break;
+                    case 5: /* VQSUB */
+                        if (u) {
+                            gen_helper_neon_sub_saturate_u64(CPU_V001);
+                        } else {
+                            gen_helper_neon_sub_saturate_s64(CPU_V001);
+                        }
+                        break;
+                    case 8: /* VSHL */
+                        if (u) {
+                            gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
+                        } else {
+                            gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
+                        }
+                        break;
+                    case 9: /* VQSHL */
+                        if (u) {
+                            gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+                                                     cpu_V0, cpu_V0);
+                        } else {
+                            gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
+                                                     cpu_V1, cpu_V0);
+                        }
+                        break;
+                    case 10: /* VRSHL */
+                        if (u) {
+                            gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
+                        } else {
+                            gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
+                        }
+                        break;
+                    case 11: /* VQRSHL */
+                        if (u) {
+                            gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
+                                                      cpu_V1, cpu_V0);
+                        } else {
+                            gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
+                                                      cpu_V1, cpu_V0);
+                        }
+                        break;
+                    case 16: /* VADD, VSUB */
+                        if (u) {
+                            tcg_gen_sub_i64(CPU_V001);
+                        } else {
+                            tcg_gen_add_i64(CPU_V001);
+                        }
+                        break;
+                    default:
+                        abort();
                     }
-                    break;
-                default:
-                    abort();
+                    neon_store_reg64(cpu_V0, rd + pass);
                 }
-                neon_store_reg64(cpu_V0, rd + pass);
+                return 0;
+            }
+            if (op != 3) {
+                return 1;
             }
-            return 0;
         }
+        pairwise = 0;
         switch (op) {
         case 8: /* VSHL */
         case 9: /* VQSHL */
@@ -4278,290 +4184,333 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 rtmp = rn;
                 rn = rm;
                 rm = rtmp;
-                pairwise = 0;
             }
             break;
+        case 19: /* VMUL */
+            if (u && size) {
+                return 1;
+            }
+            break;
+        case 23: /* VPADD */
+            if (u) {
+                return 1;
+            }
+            /* fall through */
         case 20: /* VPMAX */
         case 21: /* VPMIN */
-        case 23: /* VPADD */
             pairwise = 1;
             break;
-        case 26: /* VPADD (float) */
+        case 22: /* VQDMULH/VQRDMULH */
+            if (!size) {
+                return 1;
+            }
+            break;
+        case 26: /* VADD/VSUB/VPADD/VABD (float) */
             pairwise = (u && size < 2);
+            /* fall through */
+        case 27: /* VMLA/VMLS/VMUL (float) */
+            if (size & 1) {
+                return 1;
+            }
+            break;
+        case 28: /* VCEQ/VCGE/VCGT (float) */
+            if ((!u && size) || (size & 1)) {
+                return 1;
+            }
+            break;
+        case 29: /* VACGE/VACGT (float) */
+            if (!u || (size & 1)) {
+                return 1;
+            }
             break;
         case 30: /* VPMIN/VPMAX (float) */
             pairwise = u;
+            if (size & 1) {
+                return 1;
+            }
+            break;
+        case 31: /* VRECPS/VRSQRTS */
+            if (u || (size & 1)) {
+                return 1;
+            }
             break;
         default:
-            pairwise = 0;
             break;
         }
+        if (pairwise && q) {
+            return 1;
+        }
 
         for (pass = 0; pass < (q ? 4 : 2); pass++) {
-
-        if (pairwise) {
-            /* Pairwise.  */
-            if (q)
-                n = (pass & 1) * 2;
-            else
-                n = 0;
-            if (pass < q + 1) {
-                tmp = neon_load_reg(rn, n);
-                tmp2 = neon_load_reg(rn, n + 1);
+            if (pairwise) {
+                /* Pairwise.  */
+                if (!pass) {
+                    tmp = neon_load_reg(rn, 0);
+                    tmp2 = neon_load_reg(rn, 1);
+                } else {
+                    tmp = neon_load_reg(rm, 0);
+                    tmp2 = neon_load_reg(rm, 1);
+                }
             } else {
-                tmp = neon_load_reg(rm, n);
-                tmp2 = neon_load_reg(rm, n + 1);
+                /* Elementwise.  */
+                tmp = neon_load_reg(rn, pass);
+                tmp2 = neon_load_reg(rm, pass);
             }
-        } else {
-            /* Elementwise.  */
-            tmp = neon_load_reg(rn, pass);
-            tmp2 = neon_load_reg(rm, pass);
-        }
-        switch (op) {
-        case 0: /* VHADD */
-            GEN_NEON_INTEGER_OP(hadd);
-            break;
-        case 1: /* VQADD */
-            GEN_NEON_INTEGER_OP_ENV(qadd);
-            break;
-        case 2: /* VRHADD */
-            GEN_NEON_INTEGER_OP(rhadd);
-            break;
-        case 3: /* Logic ops.  */
-            switch ((u << 2) | size) {
-            case 0: /* VAND */
-                tcg_gen_and_i32(tmp, tmp, tmp2);
+            switch (op) {
+            case 0: /* VHADD */
+                GEN_NEON_INTEGER_OP(hadd);
                 break;
-            case 1: /* BIC */
-                tcg_gen_andc_i32(tmp, tmp, tmp2);
+            case 1: /* VQADD */
+                GEN_NEON_INTEGER_OP_ENV(qadd);
                 break;
-            case 2: /* VORR */
-                tcg_gen_or_i32(tmp, tmp, tmp2);
+            case 2: /* VRHADD */
+                GEN_NEON_INTEGER_OP(rhadd);
                 break;
-            case 3: /* VORN */
-                tcg_gen_orc_i32(tmp, tmp, tmp2);
+            case 3: /* Logic ops.  */
+                switch ((u << 2) | size) {
+                case 0: /* VAND */
+                    tcg_gen_and_i32(tmp, tmp, tmp2);
+                    break;
+                case 1: /* VBIC */
+                    tcg_gen_andc_i32(tmp, tmp, tmp2);
+                    break;
+                case 2: /* VORR, VMOV */
+                    tcg_gen_or_i32(tmp, tmp, tmp2);
+                    break;
+                case 3: /* VORN */
+                    tcg_gen_orc_i32(tmp, tmp, tmp2);
+                    break;
+                case 4: /* VEOR */
+                    tcg_gen_xor_i32(tmp, tmp, tmp2);
+                    break;
+                case 5: /* VBSL */
+                    tmp3 = neon_load_reg(rd, pass);
+                    gen_neon_bsl(tmp, tmp, tmp2, tmp3);
+                    dead_tmp(tmp3);
+                    break;
+                case 6: /* VBIT */
+                    tmp3 = neon_load_reg(rd, pass);
+                    gen_neon_bsl(tmp, tmp, tmp3, tmp2);
+                    dead_tmp(tmp3);
+                    break;
+                case 7: /* VBIF */
+                    tmp3 = neon_load_reg(rd, pass);
+                    gen_neon_bsl(tmp, tmp3, tmp, tmp2);
+                    dead_tmp(tmp3);
+                    break;
+                }
                 break;
-            case 4: /* VEOR */
-                tcg_gen_xor_i32(tmp, tmp, tmp2);
+            case 4: /* VHSUB */
+                GEN_NEON_INTEGER_OP(hsub);
                 break;
-            case 5: /* VBSL */
-                tmp3 = neon_load_reg(rd, pass);
-                gen_neon_bsl(tmp, tmp, tmp2, tmp3);
-                dead_tmp(tmp3);
+            case 5: /* VQSUB */
+                GEN_NEON_INTEGER_OP_ENV(qsub);
                 break;
-            case 6: /* VBIT */
-                tmp3 = neon_load_reg(rd, pass);
-                gen_neon_bsl(tmp, tmp, tmp3, tmp2);
-                dead_tmp(tmp3);
+            case 6: /* VCGT */
+                GEN_NEON_INTEGER_OP(cgt);
                 break;
-            case 7: /* VBIF */
-                tmp3 = neon_load_reg(rd, pass);
-                gen_neon_bsl(tmp, tmp3, tmp, tmp2);
-                dead_tmp(tmp3);
+            case 7: /* VCGE */
+                GEN_NEON_INTEGER_OP(cge);
                 break;
-            }
-            break;
-        case 4: /* VHSUB */
-            GEN_NEON_INTEGER_OP(hsub);
-            break;
-        case 5: /* VQSUB */
-            GEN_NEON_INTEGER_OP_ENV(qsub);
-            break;
-        case 6: /* VCGT */
-            GEN_NEON_INTEGER_OP(cgt);
-            break;
-        case 7: /* VCGE */
-            GEN_NEON_INTEGER_OP(cge);
-            break;
-        case 8: /* VSHL */
-            GEN_NEON_INTEGER_OP(shl);
-            break;
-        case 9: /* VQSHL */
-            GEN_NEON_INTEGER_OP_ENV(qshl);
-            break;
-        case 10: /* VRSHL */
-            GEN_NEON_INTEGER_OP(rshl);
-            break;
-        case 11: /* VQRSHL */
-            GEN_NEON_INTEGER_OP_ENV(qrshl);
-            break;
-        case 12: /* VMAX */
-            GEN_NEON_INTEGER_OP(max);
-            break;
-        case 13: /* VMIN */
-            GEN_NEON_INTEGER_OP(min);
-            break;
-        case 14: /* VABD */
-            GEN_NEON_INTEGER_OP(abd);
-            break;
-        case 15: /* VABA */
-            GEN_NEON_INTEGER_OP(abd);
-            dead_tmp(tmp2);
-            tmp2 = neon_load_reg(rd, pass);
-            gen_neon_add(size, tmp, tmp2);
-            break;
-        case 16:
-            if (!u) { /* VADD */
-                if (gen_neon_add(size, tmp, tmp2))
-                    return 1;
-            } else { /* VSUB */
-                switch (size) {
-                case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
-                case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
-                case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
-                default: return 1;
-                }
-            }
-            break;
-        case 17:
-            if (!u) { /* VTST */
-                switch (size) {
-                case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
-                case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
-                case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
-                default: return 1;
+            case 8: /* VSHL */
+                GEN_NEON_INTEGER_OP(shl);
+                break;
+            case 9: /* VQSHL */
+                GEN_NEON_INTEGER_OP_ENV(qshl);
+                break;
+            case 10: /* VRSHL */
+                GEN_NEON_INTEGER_OP(rshl);
+                break;
+            case 11: /* VQRSHL */
+                GEN_NEON_INTEGER_OP_ENV(qrshl);
+                break;
+            case 12: /* VMAX */
+                GEN_NEON_INTEGER_OP(max);
+                break;
+            case 13: /* VMIN */
+                GEN_NEON_INTEGER_OP(min);
+                break;
+            case 14: /* VABD */
+                GEN_NEON_INTEGER_OP(abd);
+                break;
+            case 15: /* VABA */
+                GEN_NEON_INTEGER_OP(abd);
+                dead_tmp(tmp2);
+                tmp2 = neon_load_reg(rd, pass);
+                gen_neon_add(size, tmp, tmp2);
+                break;
+            case 16:
+                if (!u) { /* VADD */
+                    if (gen_neon_add(size, tmp, tmp2)) {
+                        abort(); /* size == 3 is handled earlier */
+                    }
+                } else { /* VSUB */
+                    switch (size) {
+                    case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
+                    case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
+                    case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
+                    default: abort(); /* size == 3 is handled earlier */
+                    }
                 }
-            } else { /* VCEQ */
-                switch (size) {
-                case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
-                case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
-                case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
-                default: return 1;
+                break;
+            case 17:
+                if (!u) { /* VTST */
+                    switch (size) {
+                    case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
+                    case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
+                    case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
+                    default: abort(); /* size == 3 is handled earlier */
+                    }
+                } else { /* VCEQ */
+                    switch (size) {
+                    case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
+                    case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
+                    case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
+                    default: abort(); /* size == 3 is handled earlier */
+                    }
                 }
-            }
-            break;
-        case 18: /* Multiply.  */
-            switch (size) {
-            case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
-            case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
-            case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
-            default: return 1;
-            }
-            dead_tmp(tmp2);
-            tmp2 = neon_load_reg(rd, pass);
-            if (u) { /* VMLS */
-                gen_neon_rsb(size, tmp, tmp2);
-            } else { /* VMLA */
-                gen_neon_add(size, tmp, tmp2);
-            }
-            break;
-        case 19: /* VMUL */
-            if (u) { /* polynomial */
-                gen_helper_neon_mul_p8(tmp, tmp, tmp2);
-            } else { /* Integer */
+                break;
+            case 18: /* Multiply.  */
                 switch (size) {
                 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
                 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
                 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
-                default: return 1;
+                default: abort(); /* size == 3 is handled earlier */
                 }
-            }
-            break;
-        case 20: /* VPMAX */
-            GEN_NEON_INTEGER_OP(pmax);
-            break;
-        case 21: /* VPMIN */
-            GEN_NEON_INTEGER_OP(pmin);
-            break;
-        case 22: /* Hultiply high.  */
-            if (!u) { /* VQDMULH */
-                switch (size) {
-                case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); 
break;
-                case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); 
break;
-                default: return 1;
+                dead_tmp(tmp2);
+                tmp2 = neon_load_reg(rd, pass);
+                if (u) { /* VMLS */
+                    gen_neon_rsb(size, tmp, tmp2);
+                } else { /* VMLA */
+                    gen_neon_add(size, tmp, tmp2);
+                }
+                break;
+            case 19: /* VMUL */
+                if (u) { /* polynomial */
+                    gen_helper_neon_mul_p8(tmp, tmp, tmp2);
+                } else { /* Integer */
+                    switch (size) {
+                    case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
+                    case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
+                    case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
+                    default: abort(); /* size == 3 is handled earlier */
+                    }
+                }
+                break;
+            case 20: /* VPMAX */
+                GEN_NEON_INTEGER_OP(pmax);
+                break;
+            case 21: /* VPMIN */
+                GEN_NEON_INTEGER_OP(pmin);
+                break;
+            case 22: /* Multiply high.  */
+                if (!u) { /* VQDMULH */
+                    switch (size) {
+                    case 1:
+                        gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
+                        break;
+                    case 2:
+                        gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
+                        break;
+                    default:
+                        abort(); /* size == 0,3 is handled earlier */
+                    }
+                } else { /* VQRDHMUL */
+                    switch (size) {
+                    case 1:
+                        gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
+                        break;
+                    case 2:
+                        gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
+                        break;
+                    default:
+                        abort(); /* size == 0,3 is handled earlier */
+                    }
                 }
-            } else { /* VQRDHMUL */
+                break;
+            case 23: /* VPADD */
                 switch (size) {
-                case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); 
break;
-                case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); 
break;
-                default: return 1;
+                case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
+                case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
+                case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
+                default: abort(); /* size == 3 is handled earlier */
+                }
+                break;
+            case 26: /* Floating point arithmetic.  */
+                switch ((u << 2) | size) {
+                case 0: /* VADD */
+                    gen_helper_neon_add_f32(tmp, tmp, tmp2);
+                    break;
+                case 2: /* VSUB */
+                    gen_helper_neon_sub_f32(tmp, tmp, tmp2);
+                    break;
+                case 4: /* VPADD */
+                    gen_helper_neon_add_f32(tmp, tmp, tmp2);
+                    break;
+                case 6: /* VABD */
+                    gen_helper_neon_abd_f32(tmp, tmp, tmp2);
+                    break;
+                default:
+                    abort(); /* other values are handled earlier */
+                }
+                break;
+            case 27: /* Float multiply.  */
+                gen_helper_neon_mul_f32(tmp, tmp, tmp2);
+                if (!u) {
+                    dead_tmp(tmp2);
+                    tmp2 = neon_load_reg(rd, pass);
+                    if (size == 0) {
+                        gen_helper_neon_add_f32(tmp, tmp, tmp2);
+                    } else {
+                        gen_helper_neon_sub_f32(tmp, tmp2, tmp);
+                    }
                 }
-            }
-            break;
-        case 23: /* VPADD */
-            if (u)
-                return 1;
-            switch (size) {
-            case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
-            case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
-            case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
-            default: return 1;
-            }
-            break;
-        case 26: /* Floating point arithnetic.  */
-            switch ((u << 2) | size) {
-            case 0: /* VADD */
-                gen_helper_neon_add_f32(tmp, tmp, tmp2);
                 break;
-            case 2: /* VSUB */
-                gen_helper_neon_sub_f32(tmp, tmp, tmp2);
+            case 28: /* Float compare.  */
+                if (!u) {
+                    gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
+                } else {
+                    if (size == 0) {
+                        gen_helper_neon_cge_f32(tmp, tmp, tmp2);
+                    } else {
+                        gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
+                    }
+                }
                 break;
-            case 4: /* VPADD */
-                gen_helper_neon_add_f32(tmp, tmp, tmp2);
+            case 29: /* Float compare absolute.  */
+                if (size == 0) {
+                    gen_helper_neon_acge_f32(tmp, tmp, tmp2);
+                } else {
+                    gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
+                }
                 break;
-            case 6: /* VABD */
-                gen_helper_neon_abd_f32(tmp, tmp, tmp2);
+            case 30: /* Float min/max.  */
+                if (size == 0) {
+                    gen_helper_neon_max_f32(tmp, tmp, tmp2);
+                } else {
+                    gen_helper_neon_min_f32(tmp, tmp, tmp2);
+                }
                 break;
-            default:
-                return 1;
-            }
-            break;
-        case 27: /* Float multiply.  */
-            gen_helper_neon_mul_f32(tmp, tmp, tmp2);
-            if (!u) {
-                dead_tmp(tmp2);
-                tmp2 = neon_load_reg(rd, pass);
+            case 31:
                 if (size == 0) {
-                    gen_helper_neon_add_f32(tmp, tmp, tmp2);
+                    gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
                 } else {
-                    gen_helper_neon_sub_f32(tmp, tmp2, tmp);
+                    gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
                 }
+                break;
+            default:
+                abort();
             }
-            break;
-        case 28: /* Float compare.  */
-            if (!u) {
-                gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
+            dead_tmp(tmp2);
+
+            /* Save the result.  For elementwise operations we can put it
+               straight into the destination register.  For pairwise operations
+               we have to be careful to avoid clobbering the source operands.*/
+            if (pairwise && rd == rm) {
+                neon_store_scratch(pass, tmp);
             } else {
-                if (size == 0)
-                    gen_helper_neon_cge_f32(tmp, tmp, tmp2);
-                else
-                    gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
+                neon_store_reg(rd, pass, tmp);
             }
-            break;
-        case 29: /* Float compare absolute.  */
-            if (!u)
-                return 1;
-            if (size == 0)
-                gen_helper_neon_acge_f32(tmp, tmp, tmp2);
-            else
-                gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
-            break;
-        case 30: /* Float min/max.  */
-            if (size == 0)
-                gen_helper_neon_max_f32(tmp, tmp, tmp2);
-            else
-                gen_helper_neon_min_f32(tmp, tmp, tmp2);
-            break;
-        case 31:
-            if (size == 0)
-                gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
-            else
-                gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
-            break;
-        default:
-            abort();
-        }
-        dead_tmp(tmp2);
-
-        /* Save the result.  For elementwise operations we can put it
-           straight into the destination register.  For pairwise operations
-           we have to be careful to avoid clobbering the source operands.  */
-        if (pairwise && rd == rm) {
-            neon_store_scratch(pass, tmp);
-        } else {
-            neon_store_reg(rd, pass, tmp);
-        }
-
         } /* for pass */
         if (pairwise && rd == rm) {
             for (pass = 0; pass < (q ? 4 : 2); pass++) {
@@ -4575,23 +4524,32 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
             /* Two registers and shift.  */
             op = (insn >> 8) & 0xf;
             if (insn & (1 << 7)) {
+                if (op & 8) {
+                    return 1;
+                }
                 /* 64-bit shift.   */
                 size = 3;
             } else {
                 size = 2;
-                while ((insn & (1 << (size + 19))) == 0)
+                while ((insn & (1 << (size + 19))) == 0) {
                     size--;
+                }
             }
             shift = (insn >> 16) & ((1 << (3 + size)) - 1);
             /* To avoid excessive dumplication of ops we implement shift
                by immediate using the variable shift operations.  */
             if (op < 8) {
                 /* Shift by immediate:
-                   VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU.  */
+                   VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VSLI, VQSHL, VQSHLU */
                 /* Right shifts are encoded as N - shift, where N is the
                    element size in bits.  */
-                if (op <= 4)
+                if ((q && ((rd | rm) & 1))
+                    || (!u && (op == 4 || op == 6))) {
+                    return 1;
+                }
+                if (op <= 4) {
                     shift = shift - (1 << (size + 3));
+                }
                 if (size == 3) {
                     count = q + 1;
                 } else {
@@ -4622,34 +4580,42 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                         switch (op) {
                         case 0:  /* VSHR */
                         case 1:  /* VSRA */
-                            if (u)
-                                gen_helper_neon_shl_u64(cpu_V0, cpu_V0, 
cpu_V1);
-                            else
-                                gen_helper_neon_shl_s64(cpu_V0, cpu_V0, 
cpu_V1);
+                            if (u) {
+                                gen_helper_neon_shl_u64(cpu_V0, cpu_V0,
+                                                        cpu_V1);
+                            } else {
+                                gen_helper_neon_shl_s64(cpu_V0, cpu_V0,
+                                                        cpu_V1);
+                            }
                             break;
                         case 2: /* VRSHR */
                         case 3: /* VRSRA */
-                            if (u)
-                                gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, 
cpu_V1);
-                            else
-                                gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, 
cpu_V1);
+                            if (u) {
+                                gen_helper_neon_rshl_u64(cpu_V0, cpu_V0,
+                                                         cpu_V1);
+                            } else {
+                                gen_helper_neon_rshl_s64(cpu_V0, cpu_V0,
+                                                         cpu_V1);
+                            }
                             break;
                         case 4: /* VSRI */
-                            if (!u)
-                                return 1;
                             gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
                             break;
                         case 5: /* VSHL, VSLI */
                             gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
                             break;
-                        case 6: /* VQSHL */
-                            if (u)
-                                gen_helper_neon_qshl_u64(cpu_V0, cpu_env, 
cpu_V0, cpu_V1);
-                            else
-                                gen_helper_neon_qshl_s64(cpu_V0, cpu_env, 
cpu_V0, cpu_V1);
+                        case 6: /* VQSHLU */
+                            gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0,
+                                                     cpu_V1);
                             break;
-                        case 7: /* VQSHLU */
-                            gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, 
cpu_V1);
+                        case 7: /* VQSHL/VQSHLU */
+                            if (u) {
+                                gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+                                                         cpu_V0, cpu_V1);
+                            } else {
+                                gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
+                                                         cpu_V0, cpu_V1);
+                            }
                             break;
                         }
                         if (op == 1 || op == 3) {
@@ -4658,14 +4624,22 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
                         } else if (op == 4 || (op == 5 && u)) {
                             /* Insert */
-                            cpu_abort(env, "VS[LR]I.64 not implemented");
+                            neon_load_reg64(cpu_V1, rd + pass);
+                            uint64_t mask;
+                            if (op == 4) {
+                                mask = 0xffffffffffffffffull >> -shift;
+                            } else {
+                                mask = 0xffffffffffffffffull << shift;
+                            }
+                            tcg_gen_andi_i64(cpu_V0, cpu_V0, mask);
+                            tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
+                            tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
                         }
                         neon_store_reg64(cpu_V0, rd + pass);
                     } else { /* size < 3 */
                         /* Operands in T0 and T1.  */
                         tmp = neon_load_reg(rm, pass);
-                        tmp2 = new_tmp();
-                        tcg_gen_movi_i32(tmp2, imm);
+                        tmp2 = tcg_const_i32(imm);
                         switch (op) {
                         case 0:  /* VSHR */
                         case 1:  /* VSRA */
@@ -4676,31 +4650,44 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             GEN_NEON_INTEGER_OP(rshl);
                             break;
                         case 4: /* VSRI */
-                            if (!u)
-                                return 1;
-                            GEN_NEON_INTEGER_OP(shl);
-                            break;
                         case 5: /* VSHL, VSLI */
                             switch (size) {
-                            case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); 
break;
-                            case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); 
break;
-                            case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); 
break;
-                            default: return 1;
+                            case 0:
+                                gen_helper_neon_shl_u8(tmp, tmp, tmp2);
+                                break;
+                            case 1:
+                                gen_helper_neon_shl_u16(tmp, tmp, tmp2);
+                                break;
+                            case 2:
+                                gen_helper_neon_shl_u32(tmp, tmp, tmp2);
+                                break;
+                            default:
+                                abort(); /* size == 3 is handled earlier */
                             }
                             break;
-                        case 6: /* VQSHL */
+                        case 6: /* VQSHLU */
                             GEN_NEON_INTEGER_OP_ENV(qshl);
                             break;
                         case 7: /* VQSHLU */
                             switch (size) {
-                            case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, 
tmp2); break;
-                            case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, 
tmp, tmp2); break;
-                            case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, 
tmp, tmp2); break;
-                            default: return 1;
+                            case 0:
+                                gen_helper_neon_qshl_u8(tmp, cpu_env, tmp,
+                                                        tmp2);
+                                break;
+                            case 1:
+                                gen_helper_neon_qshl_u16(tmp, cpu_env, tmp,
+                                                         tmp2);
+                                break;
+                            case 2:
+                                gen_helper_neon_qshl_u32(tmp, cpu_env, tmp,
+                                                         tmp2);
+                                break;
+                            default:
+                                abort(); /* size == 3 is handled earlier */
                             }
                             break;
                         }
-                        dead_tmp(tmp2);
+                        tcg_temp_free_i32(tmp2);
 
                         if (op == 1 || op == 3) {
                             /* Accumulate.  */
@@ -4711,32 +4698,35 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             /* Insert */
                             switch (size) {
                             case 0:
-                                if (op == 4)
+                                if (op == 4) {
                                     mask = 0xff >> -shift;
-                                else
+                                } else {
                                     mask = (uint8_t)(0xff << shift);
+                                }
                                 mask |= mask << 8;
                                 mask |= mask << 16;
                                 break;
                             case 1:
-                                if (op == 4)
+                                if (op == 4) {
                                     mask = 0xffff >> -shift;
-                                else
+                                } else {
                                     mask = (uint16_t)(0xffff << shift);
+                                }
                                 mask |= mask << 16;
                                 break;
                             case 2:
                                 if (shift < -31 || shift > 31) {
                                     mask = 0;
                                 } else {
-                                    if (op == 4)
+                                    if (op == 4) {
                                         mask = 0xffffffffu >> -shift;
-                                    else
+                                    } else {
                                         mask = 0xffffffffu << shift;
+                                    }
                                 }
                                 break;
                             default:
-                                abort();
+                                abort(); /* size == 3 is handled earlier */
                             }
                             tmp2 = neon_load_reg(rd, pass);
                             tcg_gen_andi_i32(tmp, tmp, mask);
@@ -4749,7 +4739,10 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 } /* for pass */
             } else if (op < 10) {
                 /* Shift by immediate and narrow:
-                   VSHRN, VRSHRN, VQSHRN, VQRSHRN.  */
+                   VSHRN, VRSHRN, VQSHRN, VQSHRUN, VQRSHRN, VQRSHRUN */
+                if (rm & 1) {
+                    return 1;
+                }
                 shift = shift - (1 << (size + 3));
                 size++;
                 switch (size) {
@@ -4776,51 +4769,61 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                     if (size == 3) {
                         neon_load_reg64(cpu_V0, rm + pass);
                         if (q) {
-                          if (u)
-                            gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
-                          else
-                            gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
+                            if ((op == 8 && !u) || (op == 9 && u)) {
+                                gen_helper_neon_rshl_u64(cpu_V0, cpu_V0,
+                                                         tmp64);
+                            } else {
+                                gen_helper_neon_rshl_s64(cpu_V0, cpu_V0,
+                                                         tmp64);
+                            }
                         } else {
-                          if (u)
-                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
-                          else
-                            gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
+                            if ((op == 8 && !u) || (op == 9 && u)) {
+                                gen_helper_neon_shl_u64(cpu_V0, cpu_V0,
+                                                        tmp64);
+                            } else {
+                                gen_helper_neon_shl_s64(cpu_V0, cpu_V0,
+                                                        tmp64);
+                            }
                         }
                     } else {
                         tmp = neon_load_reg(rm + pass, 0);
-                        gen_neon_shift_narrow(size, tmp, tmp2, q, u);
+                        gen_neon_shift_narrow(size, tmp, tmp2, q,
+                                              (op == 8) ? !u : u);
                         tmp3 = neon_load_reg(rm + pass, 1);
-                        gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
+                        gen_neon_shift_narrow(size, tmp3, tmp2, q,
+                                              (op == 8) ? !u : u);
                         tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
                         dead_tmp(tmp);
                         dead_tmp(tmp3);
                     }
                     tmp = new_tmp();
-                    if (op == 8 && !u) {
-                        gen_neon_narrow(size - 1, tmp, cpu_V0);
-                    } else {
-                        if (op == 8)
-                            gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
-                        else
-                            gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
+                    if (op == 8) {
+                        if (u) { /* VQSHRUN / VQRSHRUN */
+                            gen_neon_unarrow_sats(size - 1, tmp, cpu_V0);
+                        } else { /* VSHRN / VRSHRN */
+                            gen_neon_narrow(size - 1, tmp, cpu_V0);
+                        }
+                    } else { /* VQSHRN / VQRSHRN */
+                        gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
                     }
                     neon_store_reg(rd, pass, tmp);
                 } /* for pass */
                 if (size == 3) {
                     tcg_temp_free_i64(tmp64);
                 } else {
-                    dead_tmp(tmp2);
+                    tcg_temp_free_i32(tmp2);
                 }
             } else if (op == 10) {
                 /* VSHLL */
-                if (q || size == 3)
+                if (q) {
                     return 1;
+                }
                 tmp = neon_load_reg(rm, 0);
                 tmp2 = neon_load_reg(rm, 1);
                 for (pass = 0; pass < 2; pass++) {
-                    if (pass == 1)
+                    if (pass == 1) {
                         tmp = tmp2;
-
+                    }
                     gen_neon_widen(cpu_V0, tmp, size, u);
 
                     if (shift != 0) {
@@ -4841,22 +4844,29 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                     }
                     neon_store_reg64(cpu_V0, rd + pass);
                 }
-            } else if (op == 15 || op == 16) {
+            } else if (op == 14 || op == 15) {
                 /* VCVT fixed-point.  */
+                if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
+                    return 1;
+                }
                 for (pass = 0; pass < (q ? 4 : 2); pass++) {
-                    tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 
pass));
+                    tcg_gen_ld_f32(cpu_F0s, cpu_env,
+                                   neon_reg_offset(rm, pass));
                     if (op & 1) {
-                        if (u)
+                        if (u) {
                             gen_vfp_ulto(0, shift);
-                        else
+                        } else {
                             gen_vfp_slto(0, shift);
+                        }
                     } else {
-                        if (u)
+                        if (u) {
                             gen_vfp_toul(0, shift);
-                        else
+                        } else {
                             gen_vfp_tosl(0, shift);
+                        }
                     }
-                    tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 
pass));
+                    tcg_gen_st_f32(cpu_F0s, cpu_env,
+                                   neon_reg_offset(rd, pass));
                 }
             } else {
                 return 1;
@@ -4895,10 +4905,14 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 break;
             case 14:
                 imm |= (imm << 8) | (imm << 16) | (imm << 24);
-                if (invert)
+                if (invert) {
                     imm = ~imm;
+                }
                 break;
             case 15:
+                if (invert) {
+                    return 1;
+                }
                 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
                       | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
                 break;
@@ -4937,36 +4951,22 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
     } else { /* (insn & 0x00800010 == 0x00800000) */
         if (size != 3) {
             op = (insn >> 8) & 0xf;
-            if ((insn & (1 << 6)) == 0) {
+            if (!q) {
                 /* Three registers of different lengths.  */
-                int src1_wide;
-                int src2_wide;
-                int prewiden;
-                /* prewiden, src1_wide, src2_wide */
-                static const int neon_3reg_wide[16][3] = {
-                    {1, 0, 0}, /* VADDL */
-                    {1, 1, 0}, /* VADDW */
-                    {1, 0, 0}, /* VSUBL */
-                    {1, 1, 0}, /* VSUBW */
-                    {0, 1, 1}, /* VADDHN */
-                    {0, 0, 0}, /* VABAL */
-                    {0, 1, 1}, /* VSUBHN */
-                    {0, 0, 0}, /* VABDL */
-                    {0, 0, 0}, /* VMLAL */
-                    {0, 0, 0}, /* VQDMLAL */
-                    {0, 0, 0}, /* VMLSL */
-                    {0, 0, 0}, /* VQDMLSL */
-                    {0, 0, 0}, /* Integer VMULL */
-                    {0, 0, 0}, /* VQDMULL */
-                    {0, 0, 0}  /* Polynomial VMULL */
-                };
-
-                prewiden = neon_3reg_wide[op][0];
-                src1_wide = neon_3reg_wide[op][1];
-                src2_wide = neon_3reg_wide[op][2];
-
-                if (size == 0 && (op == 9 || op == 11 || op == 13))
+
+                if (op == 15
+                    || (op < 4 && ((rd & 1) || ((op & 1) && (rn & 1))))
+                    || ((op == 4 || op == 6) && ((rn | rm) & 1))
+                    || ((op == 5 || op >= 7) && (rd & 1))
+                    || ((op == 9 || op == 11) && (u || size == 0))
+                    || (op == 13 && size == 0)
+                    || (op == 14 && (u || size))) {
                     return 1;
+                }
+
+                int src1_wide = (op == 1 || op == 3 || op == 4 || op == 6);
+                int src2_wide = (op == 4 || op == 6);
+                int prewiden = (op < 4);
 
                 /* Avoid overlapping operands.  Wide source operands are
                    always aligned so will never overlap with wide
@@ -5033,7 +5033,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                         case 5:
                             gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
                             break;
-                        default: abort();
+                        default: abort(); /* size == 3 is handled earlier */
                         }
                         dead_tmp(tmp2);
                         dead_tmp(tmp);
@@ -5048,7 +5048,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                         cpu_abort(env, "Polynomial VMULL not implemented");
 
                     default: /* 15 is RESERVED.  */
-                        return 1;
+                        abort(); /* op == 15 is handled earlier */
                     }
                     if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
                         /* Accumulate.  */
@@ -5091,7 +5091,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                                 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
                                 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
                                 break;
-                            default: abort();
+                            default: abort(); /* size == 3 is handled earlier 
*/
                             }
                         } else {
                             switch (size) {
@@ -5106,7 +5106,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                                 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
                                 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
                                 break;
-                            default: abort();
+                            default: abort(); /* size == 3 is handled earlier 
*/
                             }
                         }
                         if (pass == 0) {
@@ -5129,8 +5129,15 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 case 5: /* Floating point VMLS scalar */
                 case 8: /* Integer VMUL scalar */
                 case 9: /* Floating point VMUL scalar */
+                    if (size <= (op & 1)) {
+                        return 1;
+                    }
+                    /* fall through */
                 case 12: /* VQDMULH scalar */
                 case 13: /* VQRDMULH scalar */
+                    if (u && ((rd | rn) & 1)) {
+                        return 1;
+                    }
                     tmp = neon_get_scalar(size, rm);
                     neon_store_scratch(0, tmp);
                     for (pass = 0; pass < (u ? 4 : 2); pass++) {
@@ -5138,24 +5145,35 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                         tmp2 = neon_load_reg(rn, pass);
                         if (op == 12) {
                             if (size == 1) {
-                                gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, 
tmp2);
-                            } else {
-                                gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, 
tmp2);
+                                gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp,
+                                                           tmp2);
+                            } else { /* TODO: what happens when size == 0? */
+                                gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp,
+                                                           tmp2);
                             }
                         } else if (op == 13) {
                             if (size == 1) {
-                                gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, 
tmp2);
-                            } else {
-                                gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, 
tmp2);
+                                gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp,
+                                                            tmp2);
+                            } else { /* TODO: what happens when size == 0? */
+                                gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp,
+                                                            tmp2);
                             }
                         } else if (op & 1) {
                             gen_helper_neon_mul_f32(tmp, tmp, tmp2);
                         } else {
                             switch (size) {
-                            case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); 
break;
-                            case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); 
break;
-                            case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
-                            default: return 1;
+                            case 0:
+                                gen_helper_neon_mul_u8(tmp, tmp, tmp2);
+                                break;
+                            case 1:
+                                gen_helper_neon_mul_u16(tmp, tmp, tmp2);
+                                break;
+                            case 2:
+                                tcg_gen_mul_i32(tmp, tmp, tmp2);
+                                break;
+                            default:
+                                abort(); /* size == 3 is handled earlier */
                             }
                         }
                         dead_tmp(tmp2);
@@ -5176,21 +5194,26 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                                 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
                                 break;
                             default:
-                                abort();
+                                abort(); /* size == 3 is handled earlier */
                             }
                             dead_tmp(tmp2);
                         }
                         neon_store_reg(rd, pass, tmp);
                     }
                     break;
-                case 2: /* VMLAL sclar */
                 case 3: /* VQDMLAL scalar */
-                case 6: /* VMLSL scalar */
                 case 7: /* VQDMLSL scalar */
-                case 10: /* VMULL scalar */
                 case 11: /* VQDMULL scalar */
-                    if (size == 0 && (op == 3 || op == 7 || op == 11))
+                    if (u) {
+                        return 1;
+                    }
+                    /* fall through */
+                case 2: /* VMLAL sclar */
+                case 6: /* VMLSL scalar */
+                case 10: /* VMULL scalar */
+                    if (size == 0 || (rd & 1)) {
                         return 1;
+                    }
 
                     tmp2 = neon_get_scalar(size, rm);
                     tmp3 = neon_load_reg(rn, 1);
@@ -5242,8 +5265,10 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 imm = (insn >> 8) & 0xf;
                 count = q + 1;
 
-                if (imm > 7 && !q)
+                if ((imm > 7 && !q)
+                    || (q && ((rd | rn | rm) & 1))) {
                     return 1;
+                }
 
                 if (imm == 0) {
                     neon_load_reg64(cpu_V0, rn);
@@ -5293,10 +5318,15 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 /* Two register misc.  */
                 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
                 size = (insn >> 18) & 3;
+                if ((q && (op < 36 || op > 46) && ((rd | rm) & 1))
+                    || (op >= 56 && size != 2)) {
+                    return 1;
+                }
                 switch (op) {
                 case 0: /* VREV64 */
-                    if (size == 3)
+                    if (size == 3) {
                         return 1;
+                    }
                     for (pass = 0; pass < (q ? 2 : 1); pass++) {
                         tmp = neon_load_reg(rm, pass * 2);
                         tmp2 = neon_load_reg(rm, pass * 2 + 1);
@@ -5321,8 +5351,9 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                     break;
                 case 4: case 5: /* VPADDL */
                 case 12: case 13: /* VPADAL */
-                    if (size == 3)
+                    if (size == 3) {
                         return 1;
+                    }
                     for (pass = 0; pass < q + 1; pass++) {
                         tmp = neon_load_reg(rm, pass * 2);
                         gen_neon_widen(cpu_V0, tmp, size, op & 1);
@@ -5343,83 +5374,57 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                     }
                     break;
                 case 33: /* VTRN */
-                    if (size == 2) {
+                    switch (size) {
+                    case 0: case 1:
+                        goto elementwise;
+                    case 2:
                         for (n = 0; n < (q ? 4 : 2); n += 2) {
                             tmp = neon_load_reg(rm, n);
                             tmp2 = neon_load_reg(rd, n + 1);
                             neon_store_reg(rm, n, tmp2);
                             neon_store_reg(rd, n + 1, tmp);
                         }
-                    } else {
-                        goto elementwise;
+                        break;
+                    default:
+                        return 1;
                     }
                     break;
                 case 34: /* VUZP */
-                    /* Reg  Before       After
-                       Rd   A3 A2 A1 A0  B2 B0 A2 A0
-                       Rm   B3 B2 B1 B0  B3 B1 A3 A1
-                     */
-                    if (size == 3)
+                    if (size == 3 || (!q && size == 2)) {
                         return 1;
-                    gen_neon_unzip(rd, q, 0, size);
-                    gen_neon_unzip(rm, q, 4, size);
-                    if (q) {
-                        static int unzip_order_q[8] =
-                            {0, 2, 4, 6, 1, 3, 5, 7};
-                        for (n = 0; n < 8; n++) {
-                            int reg = (n < 4) ? rd : rm;
-                            tmp = neon_load_scratch(unzip_order_q[n]);
-                            neon_store_reg(reg, n % 4, tmp);
-                        }
-                    } else {
-                        static int unzip_order[4] =
-                            {0, 4, 1, 5};
-                        for (n = 0; n < 4; n++) {
-                            int reg = (n < 2) ? rd : rm;
-                            tmp = neon_load_scratch(unzip_order[n]);
-                            neon_store_reg(reg, n % 2, tmp);
-                        }
                     }
+                    tmp = tcg_const_i32(insn);
+                    gen_helper_neon_unzip(cpu_env, tmp);
+                    tcg_temp_free_i32(tmp);
                     break;
                 case 35: /* VZIP */
-                    /* Reg  Before       After
-                       Rd   A3 A2 A1 A0  B1 A1 B0 A0
-                       Rm   B3 B2 B1 B0  B3 A3 B2 A2
-                     */
-                    if (size == 3)
+                    if (size == 3 || (!q && size == 2)) {
                         return 1;
-                    count = (q ? 4 : 2);
-                    for (n = 0; n < count; n++) {
-                        tmp = neon_load_reg(rd, n);
-                        tmp2 = neon_load_reg(rd, n);
-                        switch (size) {
-                        case 0: gen_neon_zip_u8(tmp, tmp2); break;
-                        case 1: gen_neon_zip_u16(tmp, tmp2); break;
-                        case 2: /* no-op */; break;
-                        default: abort();
-                        }
-                        neon_store_scratch(n * 2, tmp);
-                        neon_store_scratch(n * 2 + 1, tmp2);
-                    }
-                    for (n = 0; n < count * 2; n++) {
-                        int reg = (n < count) ? rd : rm;
-                        tmp = neon_load_scratch(n);
-                        neon_store_reg(reg, n % count, tmp);
                     }
+                    tmp = tcg_const_i32(insn);
+                    gen_helper_neon_zip(cpu_env, tmp);
+                    tcg_temp_free_i32(tmp);
                     break;
                 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
-                    if (size == 3)
+                    if (size == 3 || (rm & 1)) {
                         return 1;
+                    }
                     TCGV_UNUSED(tmp2);
                     for (pass = 0; pass < 2; pass++) {
                         neon_load_reg64(cpu_V0, rm + pass);
                         tmp = new_tmp();
-                        if (op == 36 && q == 0) {
-                            gen_neon_narrow(size, tmp, cpu_V0);
-                        } else if (q) {
-                            gen_neon_narrow_satu(size, tmp, cpu_V0);
-                        } else {
-                            gen_neon_narrow_sats(size, tmp, cpu_V0);
+                        if (op == 36) {
+                            if (q) { /* VQMOVUN */
+                                gen_neon_unarrow_sats(size, tmp, cpu_V0);
+                            } else { /* VMOVN */
+                                gen_neon_narrow(size, tmp, cpu_V0);
+                            }
+                        } else { /* VQMOVN */
+                            if (q) {
+                                gen_neon_narrow_satu(size, tmp, cpu_V0);
+                            } else {
+                                gen_neon_narrow_sats(size, tmp, cpu_V0);
+                            }
                         }
                         if (pass == 0) {
                             tmp2 = tmp;
@@ -5430,21 +5435,25 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                     }
                     break;
                 case 38: /* VSHLL */
-                    if (q || size == 3)
+                    if (q || size == 3 || (rd & 1)) {
                         return 1;
+                    }
                     tmp = neon_load_reg(rm, 0);
                     tmp2 = neon_load_reg(rm, 1);
                     for (pass = 0; pass < 2; pass++) {
-                        if (pass == 1)
+                        if (pass == 1) {
                             tmp = tmp2;
+                        }
                         gen_neon_widen(cpu_V0, tmp, size, 1);
                         tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
                         neon_store_reg64(cpu_V0, rd + pass);
                     }
                     break;
                 case 44: /* VCVT.F16.F32 */
-                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
-                      return 1;
+                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16)
+                        || q || size != 1 || (rm & 1)) {
+                        return 1;
+                    }
                     tmp = new_tmp();
                     tmp2 = new_tmp();
                     tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
@@ -5465,8 +5474,10 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                     dead_tmp(tmp);
                     break;
                 case 46: /* VCVT.F32.F16 */
-                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
-                      return 1;
+                    if (!arm_feature(env, ARM_FEATURE_VFP_FP16)
+                        || q || size != 1 || (rd & 1)) {
+                        return 1;
+                    }
                     tmp3 = new_tmp();
                     tmp = neon_load_reg(rm, 0);
                     tmp2 = neon_load_reg(rm, 1);
@@ -5501,38 +5512,44 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             switch (size) {
                             case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
                             case 1: gen_swap_half(tmp); break;
-                            default: return 1;
+                            default: dead_tmp(tmp); return 1;
                             }
                             break;
                         case 2: /* VREV16 */
-                            if (size != 0)
+                            if (size != 0) {
+                                dead_tmp(tmp);
                                 return 1;
+                            }
                             gen_rev16(tmp);
                             break;
-                        case 8: /* CLS */
+                        case 8: /* VCLS */
                             switch (size) {
                             case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
                             case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
                             case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
-                            default: return 1;
+                            default: dead_tmp(tmp); return 1;
                             }
                             break;
-                        case 9: /* CLZ */
+                        case 9: /* VCLZ */
                             switch (size) {
                             case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
                             case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
                             case 2: gen_helper_clz(tmp, tmp); break;
-                            default: return 1;
+                            default: dead_tmp(tmp); return 1;
                             }
                             break;
-                        case 10: /* CNT */
-                            if (size != 0)
+                        case 10: /* VCNT */
+                            if (size != 0) {
+                                dead_tmp(tmp);
                                 return 1;
+                            }
                             gen_helper_neon_cnt_u8(tmp, tmp);
                             break;
-                        case 11: /* VNOT */
-                            if (size != 0)
+                        case 11: /* VMVN */
+                            if (size != 0) {
+                                dead_tmp(tmp);
                                 return 1;
+                            }
                             tcg_gen_not_i32(tmp, tmp);
                             break;
                         case 14: /* VQABS */
@@ -5540,7 +5557,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, 
tmp); break;
                             case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, 
tmp); break;
                             case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, 
tmp); break;
-                            default: return 1;
+                            default: dead_tmp(tmp); return 1;
                             }
                             break;
                         case 15: /* VQNEG */
@@ -5548,84 +5565,112 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, 
tmp); break;
                             case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, 
tmp); break;
                             case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, 
tmp); break;
-                            default: return 1;
+                            default: dead_tmp(tmp); return 1;
                             }
                             break;
                         case 16: case 19: /* VCGT #0, VCLE #0 */
                             tmp2 = tcg_const_i32(0);
-                            switch(size) {
+                            switch (size) {
                             case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); 
break;
                             case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); 
break;
                             case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); 
break;
-                            default: return 1;
+                            default: tcg_temp_free_i32(tmp2); dead_tmp(tmp); 
return 1;
                             }
-                            tcg_temp_free(tmp2);
-                            if (op == 19)
+                            tcg_temp_free_i32(tmp2);
+                            if (op == 19) {
                                 tcg_gen_not_i32(tmp, tmp);
+                            }
                             break;
                         case 17: case 20: /* VCGE #0, VCLT #0 */
                             tmp2 = tcg_const_i32(0);
-                            switch(size) {
+                            switch (size) {
                             case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); 
break;
                             case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); 
break;
                             case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); 
break;
-                            default: return 1;
+                            default: tcg_temp_free_i32(tmp2); dead_tmp(tmp); 
return 1;
                             }
-                            tcg_temp_free(tmp2);
-                            if (op == 20)
+                            tcg_temp_free_i32(tmp2);
+                            if (op == 20) {
                                 tcg_gen_not_i32(tmp, tmp);
+                            }
                             break;
                         case 18: /* VCEQ #0 */
                             tmp2 = tcg_const_i32(0);
-                            switch(size) {
+                            switch (size) {
                             case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); 
break;
                             case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); 
break;
                             case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); 
break;
-                            default: return 1;
+                            default: tcg_temp_free_i32(tmp2); dead_tmp(tmp); 
return 1;
                             }
-                            tcg_temp_free(tmp2);
+                            tcg_temp_free_i32(tmp2);
                             break;
                         case 22: /* VABS */
-                            switch(size) {
+                            switch (size) {
                             case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
                             case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
                             case 2: tcg_gen_abs_i32(tmp, tmp); break;
-                            default: return 1;
+                            default: dead_tmp(tmp); return 1;
                             }
                             break;
                         case 23: /* VNEG */
-                            if (size == 3)
+                            if (size == 3) {
+                                dead_tmp(tmp);
                                 return 1;
+                            }
                             tmp2 = tcg_const_i32(0);
                             gen_neon_rsb(size, tmp, tmp2);
-                            tcg_temp_free(tmp2);
+                            tcg_temp_free_i32(tmp2);
                             break;
                         case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
+                            if (size != 2) {
+                                dead_tmp(tmp);
+                                return 1;
+                            }
                             tmp2 = tcg_const_i32(0);
                             gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
-                            tcg_temp_free(tmp2);
-                            if (op == 27)
+                            tcg_temp_free_i32(tmp2);
+                            if (op == 27) {
                                 tcg_gen_not_i32(tmp, tmp);
+                            }
                             break;
                         case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
+                            if (size != 2) {
+                                dead_tmp(tmp);
+                                return 1;
+                            }
                             tmp2 = tcg_const_i32(0);
                             gen_helper_neon_cge_f32(tmp, tmp, tmp2);
-                            tcg_temp_free(tmp2);
-                            if (op == 28)
+                            tcg_temp_free_i32(tmp2);
+                            if (op == 28) {
                                 tcg_gen_not_i32(tmp, tmp);
+                            }
                             break;
                         case 26: /* Float VCEQ #0 */
+                            if (size != 2) {
+                                dead_tmp(tmp);
+                                return 1;
+                            }
                             tmp2 = tcg_const_i32(0);
                             gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
-                            tcg_temp_free(tmp2);
+                            tcg_temp_free_i32(tmp2);
                             break;
                         case 30: /* Float VABS */
+                            if (size != 2) {
+                                return 1;
+                            }
                             gen_vfp_abs(0);
                             break;
                         case 31: /* Float VNEG */
+                            if (size != 2) {
+                                return 1;
+                            }
                             gen_vfp_neg(0);
                             break;
                         case 32: /* VSWP */
+                            if (size != 0) {
+                                dead_tmp(tmp);
+                                return 1;
+                            }
                             tmp2 = neon_load_reg(rd, pass);
                             neon_store_reg(rm, pass, tmp2);
                             break;
@@ -5634,8 +5679,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             switch (size) {
                             case 0: gen_neon_trn_u8(tmp, tmp2); break;
                             case 1: gen_neon_trn_u16(tmp, tmp2); break;
-                            case 2: abort();
-                            default: return 1;
+                            default: abort(); /* size == 2,3 is handled 
earlier */
                             }
                             neon_store_reg(rm, pass, tmp2);
                             break;
@@ -5652,19 +5696,20 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                             gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
                             break;
                         case 60: /* VCVT.F32.S32 */
-                            gen_vfp_tosiz(0);
+                            gen_vfp_sito(0);
                             break;
                         case 61: /* VCVT.F32.U32 */
-                            gen_vfp_touiz(0);
+                            gen_vfp_uito(0);
                             break;
                         case 62: /* VCVT.S32.F32 */
-                            gen_vfp_sito(0);
+                            gen_vfp_tosiz(0);
                             break;
                         case 63: /* VCVT.U32.F32 */
-                            gen_vfp_uito(0);
+                            gen_vfp_touiz(0);
                             break;
                         default:
-                            /* Reserved: 21, 29, 39-56 */
+                            /* Reserved: 3, 6, 7, 21, 29, 39-43, 45, 47-55 */
+                            dead_tmp(tmp);
                             return 1;
                         }
                         if (op == 30 || op == 31 || op >= 58) {
@@ -5679,7 +5724,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
             } else if ((insn & (1 << 10)) == 0) {
                 /* VTBL, VTBX.  */
                 n = ((insn >> 5) & 0x18) + 8;
-                if (insn & (1 << 6)) {
+                if (q) {
                     tmp = neon_load_reg(rd, 0);
                 } else {
                     tmp = new_tmp();
@@ -5690,7 +5735,7 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 tmp5 = tcg_const_i32(n);
                 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
                 dead_tmp(tmp);
-                if (insn & (1 << 6)) {
+                if (q) {
                     tmp = neon_load_reg(rd, 1);
                 } else {
                     tmp = new_tmp();
@@ -5705,6 +5750,9 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 dead_tmp(tmp);
             } else if ((insn & 0x380) == 0) {
                 /* VDUP */
+                if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
+                    return 1;
+                }
                 if (insn & (1 << 19)) {
                     tmp = neon_load_reg(rm, 1);
                 } else {
@@ -5713,10 +5761,11 @@ static int disas_neon_data_insn(CPUState * env, 
DisasContext *s, uint32_t insn)
                 if (insn & (1 << 16)) {
                     gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
                 } else if (insn & (1 << 17)) {
-                    if ((insn >> 18) & 1)
+                    if ((insn >> 18) & 1) {
                         gen_neon_dup_high16(tmp);
-                    else
+                    } else {
                         gen_neon_dup_low16(tmp);
+                    }
                 }
                 for (pass = 0; pass < (q ? 4 : 2); pass++) {
                     tmp2 = new_tmp();
@@ -6343,11 +6392,22 @@ static void disas_arm_insn(CPUState * env, DisasContext 
*s)
             dead_tmp(tmp2);
             store_reg(s, rd, tmp);
             break;
-        case 7: /* bkpt */
-            gen_set_condexec(s);
-            gen_set_pc_im(s->pc - 4);
-            gen_exception(EXCP_BKPT);
-            s->is_jmp = DISAS_JUMP;
+        case 7:
+            if (op1 == 1) {
+                /* bkpt */
+                gen_set_condexec(s);
+                gen_set_pc_im(s->pc - 4);
+                gen_exception(EXCP_BKPT);
+                s->is_jmp = DISAS_JUMP;
+            } else if (op1 == 3) {
+                /* smi/smc */
+                if (!(env->cp15.c0_c2[4] & 0xf000) || IS_USER(s))
+                    goto illegal_op;
+                /* TODO: real implementation; execute as NOP for now */
+                /*fprintf(stderr, "smc [0x%08x] pc=0x%08x\n", insn, s->pc);*/
+            } else {
+                goto illegal_op;
+            }
             break;
         case 0x8: /* signed multiply */
         case 0xa:
@@ -6887,7 +6947,7 @@ static void disas_arm_insn(CPUState * env, DisasContext 
*s)
                         case 4: gen_uxtb16(tmp);  break;
                         case 6: gen_uxtb(tmp);    break;
                         case 7: gen_uxth(tmp);    break;
-                        default: goto illegal_op;
+                        default: dead_tmp(tmp); goto illegal_op;
                         }
                         if (rn != 15) {
                             tmp2 = load_reg(s, rn);
@@ -7097,7 +7157,8 @@ static void disas_arm_insn(CPUState * env, DisasContext 
*s)
                 }
                 rn = (insn >> 16) & 0xf;
                 addr = load_reg(s, rn);
-
+                tmp3 = tcg_const_i32(4);
+                
                 /* compute total size */
                 loaded_base = 0;
                 TCGV_UNUSED(loaded_var);
@@ -7110,7 +7171,7 @@ static void disas_arm_insn(CPUState * env, DisasContext 
*s)
                 if (insn & (1 << 23)) {
                     if (insn & (1 << 24)) {
                         /* pre increment */
-                        tcg_gen_addi_i32(addr, addr, 4);
+                        tcg_gen_add_i32(addr, addr, tmp3);
                     } else {
                         /* post increment */
                     }
@@ -7163,7 +7224,7 @@ static void disas_arm_insn(CPUState * env, DisasContext 
*s)
                         j++;
                         /* no need to add after the last transfer */
                         if (j != n)
-                            tcg_gen_addi_i32(addr, addr, 4);
+                            tcg_gen_add_i32(addr, addr, tmp3);
                     }
                 }
                 if (insn & (1 << 21)) {
@@ -7173,7 +7234,7 @@ static void disas_arm_insn(CPUState * env, DisasContext 
*s)
                             /* pre increment */
                         } else {
                             /* post increment */
-                            tcg_gen_addi_i32(addr, addr, 4);
+                            tcg_gen_add_i32(addr, addr, tmp3);
                         }
                     } else {
                         if (insn & (1 << 24)) {
@@ -7189,6 +7250,7 @@ static void disas_arm_insn(CPUState * env, DisasContext 
*s)
                 } else {
                     dead_tmp(addr);
                 }
+                tcg_temp_free_i32(tmp3);
                 if (loaded_base) {
                     store_reg(s, rn, loaded_var);
                 }
@@ -7565,6 +7627,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext 
*s, uint16_t insn_hw1)
                     tcg_gen_addi_i32(addr, addr, -offset);
                 }
 
+                tmp2 = tcg_const_i32(4);
                 for (i = 0; i < 16; i++) {
                     if ((insn & (1 << i)) == 0)
                         continue;
@@ -7581,8 +7644,9 @@ static int disas_thumb2_insn(CPUState *env, DisasContext 
*s, uint16_t insn_hw1)
                         tmp = load_reg(s, i);
                         gen_st32(tmp, addr, IS_USER(s));
                     }
-                    tcg_gen_addi_i32(addr, addr, 4);
+                    tcg_gen_add_i32(addr, addr, tmp2);
                 }
+                tcg_temp_free_i32(tmp2);
                 if (insn & (1 << 21)) {
                     /* Base register writeback.  */
                     if (insn & (1 << 24)) {
@@ -7882,7 +7946,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext 
*s, uint16_t insn_hw1)
         /* Coprocessor.  */
         if (((insn >> 24) & 3) == 3) {
             /* Translate into the equivalent ARM encoding.  */
-            insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
+            insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
             if (disas_neon_data_insn(env, s, insn))
                 goto illegal_op;
         } else {
@@ -8796,6 +8860,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext 
*s)
             if ((insn & (1 << 11)) == 0) {
                 tcg_gen_addi_i32(addr, addr, -offset);
             }
+            tmp2 = tcg_const_i32(4);
             for (i = 0; i < 8; i++) {
                 if (insn & (1 << i)) {
                     if (insn & (1 << 11)) {
@@ -8808,7 +8873,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext 
*s)
                         gen_st32(tmp, addr, IS_USER(s));
                     }
                     /* advance to the next address.  */
-                    tcg_gen_addi_i32(addr, addr, 4);
+                    tcg_gen_add_i32(addr, addr, tmp2);
                 }
             }
             TCGV_UNUSED(tmp);
@@ -8823,8 +8888,9 @@ static void disas_thumb_insn(CPUState *env, DisasContext 
*s)
                     tmp = load_reg(s, 14);
                     gen_st32(tmp, addr, IS_USER(s));
                 }
-                tcg_gen_addi_i32(addr, addr, 4);
+                tcg_gen_add_i32(addr, addr, tmp2);
             }
+            tcg_temp_free_i32(tmp2);
             if ((insn & (1 << 11)) == 0) {
                 tcg_gen_addi_i32(addr, addr, -offset);
             }
@@ -9017,9 +9083,12 @@ static inline void 
gen_intermediate_code_internal(CPUState *env,
     uint32_t next_page_start;
     int num_insns;
     int max_insns;
+#ifdef RESOURCE_LEAK_DEBUG
+    int force_asmdump = 0;
 
     /* generate intermediate code */
     num_temps = 0;
+#endif
 
     pc_start = tb->pc;
 
@@ -9049,6 +9118,9 @@ static inline void 
gen_intermediate_code_internal(CPUState *env,
     cpu_V1 = cpu_F1d;
     /* FIXME: cpu_M0 can probably be the same as cpu_V0.  */
     cpu_M0 = tcg_temp_new_i64();
+#ifdef RESOURCE_LEAK_DEBUG
+    num_temps = 0; /* to ignore above global temporaries from the count */
+#endif
     next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
     lj = -1;
     num_insns = 0;
@@ -9115,6 +9187,10 @@ static inline void 
gen_intermediate_code_internal(CPUState *env,
         if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
             gen_io_start();
 
+        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
+            tcg_gen_debug_insn_start(dc->pc);
+        }
+
         if (env->thumb) {
             disas_thumb_insn(env, dc);
             if (dc->condexec_mask) {
@@ -9128,10 +9204,13 @@ static inline void 
gen_intermediate_code_internal(CPUState *env,
         } else {
             disas_arm_insn(env, dc);
         }
+#ifdef RESOURCE_LEAK_DEBUG
         if (num_temps) {
             fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
+            force_asmdump = 1;
             num_temps = 0;
         }
+#endif
 
         if (dc->condjmp && !dc->is_jmp) {
             gen_set_label(dc->condlabel);
@@ -9225,6 +9304,14 @@ done_generating:
     gen_icount_end(tb, num_insns);
     *gen_opc_ptr = INDEX_op_end;
 
+#ifdef RESOURCE_LEAK_DEBUG
+    if (force_asmdump) {
+        fprintf(stderr, "----------------\n");
+        fprintf(stderr, "IN: %s\n", lookup_symbol(pc_start));
+        target_disas(stderr, pc_start, dc->pc - pc_start, env->thumb);
+        fprintf(stderr, "\n");
+    }
+#endif
 #ifdef DEBUG_DISAS
     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
         qemu_log("----------------\n");
-- 
1.6.5





reply via email to

[Prev in Thread] Current Thread [Next in Thread]