aboutsummaryrefslogtreecommitdiffstats
path: root/target-arm
diff options
context:
space:
mode:
authorDavid 'Digit' Turner <digit@google.com>2009-09-14 14:32:27 -0700
committerDavid 'Digit' Turner <digit@google.com>2009-09-14 14:32:27 -0700
commit5d8f37ad78fc66901af50c762029a501561f3b23 (patch)
tree206790f8f21000850a98c4f9590a79e779106278 /target-arm
parentcd059b15f2c7df69f4a087bd66900eb172e41d1c (diff)
downloadexternal_qemu-5d8f37ad78fc66901af50c762029a501561f3b23.zip
external_qemu-5d8f37ad78fc66901af50c762029a501561f3b23.tar.gz
external_qemu-5d8f37ad78fc66901af50c762029a501561f3b23.tar.bz2
Merge upstream QEMU 10.0.50 into the Android source tree.
This change integrates many changes from the upstream QEMU sources. Its main purpose is to enable correct ARMv6 and ARMv7 support to the Android emulator. Due to the nature of the upstream code base, this unfortunately also required changes to many other parts of the source. Note that to ensure easier integrations in the future, some source files and directories that have heavy Android-specific customization have been renamed with an -android suffix. The original files are still there for easier integration tracking, but *never* compiled. For example: net.c net-android.c qemu-char.c qemu-char-android.c slirp/ slirp-android/ etc... Tested on linux-x86, darwin-x86 and windows host machines.
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/cpu.h42
-rw-r--r--target-arm/exec.h14
-rw-r--r--target-arm/helper.c189
-rw-r--r--target-arm/helpers.h929
-rw-r--r--target-arm/iwmmxt_helper.c2
-rw-r--r--target-arm/machine.c27
-rw-r--r--target-arm/neon_helper.c4
-rw-r--r--target-arm/op_addsub.h4
-rw-r--r--target-arm/op_helper.c120
-rw-r--r--target-arm/translate.c769
10 files changed, 1078 insertions, 1022 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index ff765f7..f98655f 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -15,7 +15,7 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#ifndef CPU_ARM_H
#define CPU_ARM_H
@@ -24,6 +24,8 @@
#define ELF_MACHINE EM_ARM
+#define CPUState struct CPUARMState
+
#include "cpu-defs.h"
#include "softfloat.h"
@@ -100,6 +102,9 @@ typedef struct CPUARMState {
struct {
uint32_t c0_cpuid;
uint32_t c0_cachetype;
+ uint32_t c0_ccsid[16]; /* Cache size. */
+ uint32_t c0_clid; /* Cache level. */
+ uint32_t c0_cssel; /* Cache size selection. */
uint32_t c0_c1[8]; /* Feature registers. */
uint32_t c0_c2[8]; /* Instruction set registers. */
uint32_t c1_sys; /* System control register. */
@@ -107,7 +112,9 @@ typedef struct CPUARMState {
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
uint32_t c2_base0; /* MMU translation table base 0. */
uint32_t c2_base1; /* MMU translation table base 1. */
- uint32_t c2_mask; /* MMU translation table base mask. */
+ uint32_t c2_control; /* MMU translation table base control. */
+ uint32_t c2_mask; /* MMU translation table base selection mask. */
+ uint32_t c2_base_mask; /* MMU translation table base 0 mask. */
uint32_t c2_data; /* MPU data cachable bits. */
uint32_t c2_insn; /* MPU instruction cachable bits. */
uint32_t c3; /* MMU domain access control register
@@ -149,6 +156,10 @@ typedef struct CPUARMState {
void *opaque;
} cp[15];
+ /* Thumb-2 EE state. */
+ uint32_t teecr;
+ uint32_t teehbr;
+
/* Internal CPU feature flags. */
uint32_t features;
@@ -208,6 +219,8 @@ uint32_t do_arm_semihosting(CPUARMState *env);
is returned if the signal was handled by the virtual CPU. */
int cpu_arm_signal_handler(int host_signum, void *pinfo,
void *puc);
+int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
+ int mmu_idx, int is_softmuu);
void cpu_lock(void);
void cpu_unlock(void);
@@ -327,7 +340,8 @@ enum arm_features {
ARM_FEATURE_NEON,
ARM_FEATURE_DIV,
ARM_FEATURE_M, /* Microcontroller profile. */
- ARM_FEATURE_OMAPCP /* OMAP specific CP15 ops handling. */
+ ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
+ ARM_FEATURE_THUMB2EE
};
static inline int arm_feature(CPUARMState *env, int feature)
@@ -386,7 +400,6 @@ void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
#define TARGET_PAGE_BITS 10
#endif
-#define CPUState CPUARMState
#define cpu_init cpu_arm_init
#define cpu_exec cpu_arm_exec
#define cpu_gen_code cpu_arm_gen_code
@@ -413,8 +426,25 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
}
#endif
-#define CPU_PC_FROM_TB(env, tb) env->regs[15] = tb->pc
-
#include "cpu-all.h"
+#include "exec-all.h"
+
+static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+{
+ env->regs[15] = tb->pc;
+}
+
+static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
+ *pc = env->regs[15];
+ *cs_base = 0;
+ *flags = env->thumb | (env->vfp.vec_len << 1)
+ | (env->vfp.vec_stride << 4) | (env->condexec_bits << 8);
+ if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
+ *flags |= (1 << 6);
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
+ *flags |= (1 << 7);
+}
#endif
diff --git a/target-arm/exec.h b/target-arm/exec.h
index c543cf4..710a2f9 100644
--- a/target-arm/exec.h
+++ b/target-arm/exec.h
@@ -15,7 +15,7 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include "config.h"
#include "dyngen-exec.h"
@@ -37,8 +37,11 @@ static inline void regs_to_env(void)
{
}
-int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
- int mmu_idx, int is_softmmu);
+static inline int cpu_has_work(CPUState *env)
+{
+ return (env->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB));
+}
static inline int cpu_halted(CPUState *env) {
if (!env->halted)
@@ -46,8 +49,7 @@ static inline int cpu_halted(CPUState *env) {
/* An interrupt wakes the CPU even if the I and F CPSR bits are
set. We use EXITTB to silently wake CPU without causing an
actual interrupt. */
- if (env->interrupt_request &
- (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB)) {
+ if (cpu_has_work(env)) {
env->halted = 0;
return 0;
}
@@ -58,6 +60,4 @@ static inline int cpu_halted(CPUState *env) {
#include "softmmu_exec.h"
#endif
-void cpu_loop_exit(void);
-
void raise_exception(int);
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 85753e4..a225224 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -91,12 +91,17 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
set_feature(env, ARM_FEATURE_VFP);
set_feature(env, ARM_FEATURE_VFP3);
set_feature(env, ARM_FEATURE_NEON);
+ set_feature(env, ARM_FEATURE_THUMB2EE);
env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
- env->cp15.c0_cachetype = 0x1dd20d2;
+ env->cp15.c0_cachetype = 0x82048004;
+ env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
+ env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
+ env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
+ env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
break;
case ARM_CPUID_CORTEXM3:
set_feature(env, ARM_FEATURE_V6);
@@ -113,6 +118,7 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
set_feature(env, ARM_FEATURE_VFP);
set_feature(env, ARM_FEATURE_VFP3);
set_feature(env, ARM_FEATURE_NEON);
+ set_feature(env, ARM_FEATURE_THUMB2EE);
set_feature(env, ARM_FEATURE_DIV);
break;
case ARM_CPUID_TI915T:
@@ -156,6 +162,12 @@ static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
void cpu_reset(CPUARMState *env)
{
uint32_t id;
+
+ if (qemu_loglevel_mask(CPU_LOG_RESET)) {
+ qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
+ log_cpu_state(env, 0);
+ }
+
id = env->cp15.c0_cpuid;
memset(env, 0, offsetof(CPUARMState, breakpoints));
if (id)
@@ -171,11 +183,64 @@ void cpu_reset(CPUARMState *env)
if (IS_M(env))
env->uncached_cpsr &= ~CPSR_I;
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
+ env->cp15.c2_base_mask = 0xffffc000u;
#endif
env->regs[15] = 0;
tlb_flush(env, 1);
}
+static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
+{
+ int nregs;
+
+ /* VFP data registers are always little-endian. */
+ nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
+ if (reg < nregs) {
+ stfq_le_p(buf, env->vfp.regs[reg]);
+ return 8;
+ }
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ /* Aliases for Q regs. */
+ nregs += 16;
+ if (reg < nregs) {
+ stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
+ stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
+ return 16;
+ }
+ }
+ switch (reg - nregs) {
+ case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
+ case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
+ case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
+ }
+ return 0;
+}
+
+static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
+{
+ int nregs;
+
+ nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
+ if (reg < nregs) {
+ env->vfp.regs[reg] = ldfq_le_p(buf);
+ return 8;
+ }
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ nregs += 16;
+ if (reg < nregs) {
+ env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
+ env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
+ return 16;
+ }
+ }
+ switch (reg - nregs) {
+ case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
+ case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
+ case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf); return 4;
+ }
+ return 0;
+}
+
CPUARMState *cpu_arm_init(const char *cpu_model)
{
CPUARMState *env;
@@ -186,8 +251,6 @@ CPUARMState *cpu_arm_init(const char *cpu_model)
if (id == 0)
return NULL;
env = qemu_mallocz(sizeof(CPUARMState));
- if (!env)
- return NULL;
cpu_exec_init(env);
if (!inited) {
inited = 1;
@@ -197,6 +260,17 @@ CPUARMState *cpu_arm_init(const char *cpu_model)
env->cpu_model_str = cpu_model;
env->cp15.c0_cpuid = id;
cpu_reset(env);
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 51, "arm-neon.xml", 0);
+ } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 35, "arm-vfp3.xml", 0);
+ } else if (arm_feature(env, ARM_FEATURE_VFP)) {
+ gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 19, "arm-vfp.xml", 0);
+ }
+ qemu_init_vcpu(env);
return env;
}
@@ -396,8 +470,6 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
static void allocate_mmon_state(CPUState *env)
{
env->mmon_entry = malloc(sizeof (mmon_state));
- if (!env->mmon_entry)
- abort();
memset (env->mmon_entry, 0, sizeof (mmon_state));
env->mmon_entry->cpu_env = env;
mmon_head = env->mmon_entry;
@@ -622,7 +694,7 @@ static void do_v7m_exception_exit(CPUARMState *env)
pointer. */
}
-void do_interrupt_v7m(CPUARMState *env)
+static void do_interrupt_v7m(CPUARMState *env)
{
uint32_t xpsr = xpsr_read(env);
uint32_t lr;
@@ -846,17 +918,34 @@ static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
return PAGE_READ | PAGE_WRITE;
case 3:
return PAGE_READ | PAGE_WRITE;
- case 4: case 7: /* Reserved. */
+ case 4: /* Reserved. */
return 0;
case 5:
return is_user ? 0 : prot_ro;
case 6:
return prot_ro;
+ case 7:
+ if (!arm_feature (env, ARM_FEATURE_V7))
+ return 0;
+ return prot_ro;
default:
abort();
}
}
+static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
+{
+ uint32_t table;
+
+ if (address & env->cp15.c2_mask)
+ table = env->cp15.c2_base1 & 0xffffc000;
+ else
+ table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
+
+ table |= (address >> 18) & 0x3ffc;
+ return table;
+}
+
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
int is_user, uint32_t *phys_ptr, int *prot)
{
@@ -870,11 +959,7 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
/* Pagetable walk. */
/* Lookup l1 descriptor. */
- if (address & env->cp15.c2_mask)
- table = env->cp15.c2_base1;
- else
- table = env->cp15.c2_base0;
- table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
+ table = get_level1_table_address(env, address);
desc = ldl_phys(table);
type = (desc & 3);
domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
@@ -962,11 +1047,7 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
/* Pagetable walk. */
/* Lookup l1 descriptor. */
- if (address & env->cp15.c2_mask)
- table = env->cp15.c2_base1;
- else
- table = env->cp15.c2_base0;
- table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
+ table = get_level1_table_address(env, address);
desc = ldl_phys(table);
type = (desc & 3);
if (type == 0) {
@@ -1026,6 +1107,12 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
if (xn && access_type == 2)
goto do_fault;
+ /* The simplified model uses AP[0] as an access control bit. */
+ if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
+ /* Access flag fault. */
+ code = (code == 15) ? 6 : 3;
+ goto do_fault;
+ }
*prot = check_ap(env, ap, domain, access_type, is_user);
if (!*prot) {
/* Access permission fault. */
@@ -1250,15 +1337,16 @@ void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
crm = insn & 0xf;
switch ((insn >> 16) & 0xf) {
case 0:
- if (((insn >> 21) & 7) == 2) {
- /* ??? Select cache level. Ignore. */
- return;
- }
/* ID codes. */
if (arm_feature(env, ARM_FEATURE_XSCALE))
break;
if (arm_feature(env, ARM_FEATURE_OMAPCP))
break;
+ if (arm_feature(env, ARM_FEATURE_V7)
+ && op1 == 2 && crm == 0 && op2 == 0) {
+ env->cp15.c0_cssel = val & 0xf;
+ break;
+ }
goto bad_reg;
case 1: /* System configuration. */
if (arm_feature(env, ARM_FEATURE_OMAPCP))
@@ -1281,9 +1369,11 @@ void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
case 2:
if (arm_feature(env, ARM_FEATURE_XSCALE))
goto bad_reg;
- env->cp15.c1_coproc = val;
- /* ??? Is this safe when called from within a TB? */
- tb_flush(env);
+ if (env->cp15.c1_coproc != val) {
+ env->cp15.c1_coproc = val;
+ /* ??? Is this safe when called from within a TB? */
+ tb_flush(env);
+ }
break;
default:
goto bad_reg;
@@ -1310,7 +1400,10 @@ void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
env->cp15.c2_base1 = val;
break;
case 2:
+ val &= 7;
+ env->cp15.c2_control = val;
env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
+ env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
break;
default:
goto bad_reg;
@@ -1572,9 +1665,22 @@ uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
goto bad_reg;
if (crm != 0)
goto bad_reg;
- if (arm_feature(env, ARM_FEATURE_XSCALE))
+ if (!arm_feature(env, ARM_FEATURE_V7))
+ return 0;
+
+ switch (op2) {
+ case 0:
+ return env->cp15.c0_ccsid[env->cp15.c0_cssel];
+ case 1:
+ return env->cp15.c0_clid;
+ case 7:
+ return 0;
+ }
+ goto bad_reg;
+ case 2:
+ if (op2 != 0 || crm != 0)
goto bad_reg;
- return 0;
+ return env->cp15.c0_cssel;
default:
goto bad_reg;
}
@@ -1598,7 +1704,7 @@ uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
case ARM_CPUID_ARM11MPCORE:
return 1;
case ARM_CPUID_CORTEXA8:
- return 0;
+ return 2;
default:
goto bad_reg;
}
@@ -1628,17 +1734,7 @@ uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
case 1:
return env->cp15.c2_base1;
case 2:
- {
- int n;
- uint32_t mask;
- n = 0;
- mask = env->cp15.c2_mask;
- while (mask) {
- n++;
- mask <<= 1;
- }
- return n;
- }
+ return env->cp15.c2_control;
default:
goto bad_reg;
}
@@ -2250,10 +2346,13 @@ void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
}
set_float_rounding_mode(i, &env->vfp.fp_status);
}
+ if (changed & (1 << 24))
+ set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
+ if (changed & (1 << 25))
+ set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
i = vfp_exceptbits_to_host((val >> 8) & 0x1f);
set_float_exception_flags(i, &env->vfp.fp_status);
- /* XXX: FZ and DN are not implemented. */
}
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
@@ -2458,7 +2557,7 @@ ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \
ftype tmp; \
tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \
&env->vfp.fp_status); \
- return ftype##_scalbn(tmp, shift, &env->vfp.fp_status); \
+ return ftype##_scalbn(tmp, -(int)shift, &env->vfp.fp_status); \
} \
ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \
{ \
@@ -2560,3 +2659,13 @@ void HELPER(traceBB64)(uint64_t bb_num, uint64_t tb)
#endif
#endif /* CONFIG_TRACE */
+
+void HELPER(set_teecr)(CPUState *env, uint32_t val)
+{
+ val &= 1;
+ if (env->teecr != val) {
+ env->teecr = val;
+ tb_flush(env);
+ }
+}
+
diff --git a/target-arm/helpers.h b/target-arm/helpers.h
index cef53be..1a6790e 100644
--- a/target-arm/helpers.h
+++ b/target-arm/helpers.h
@@ -1,476 +1,405 @@
-#define DEF_HELPER(name, ret, args) ret glue(helper_,name) args;
-
-#ifdef GEN_HELPER
-#define DEF_HELPER_0_0(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(void) \
-{ \
- tcg_gen_helper_0_0(helper_##name); \
-}
-#define DEF_HELPER_0_1(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(TCGv arg1) \
-{ \
- tcg_gen_helper_0_1(helper_##name, arg1); \
-}
-#define DEF_HELPER_0_2(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(TCGv arg1, TCGv arg2) \
-{ \
- tcg_gen_helper_0_2(helper_##name, arg1, arg2); \
-}
-#define DEF_HELPER_0_3(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name( \
- TCGv arg1, TCGv arg2, TCGv arg3) \
-{ \
- tcg_gen_helper_0_3(helper_##name, arg1, arg2, arg3); \
-}
-#define DEF_HELPER_1_0(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(TCGv ret) \
-{ \
- tcg_gen_helper_1_0(helper_##name, ret); \
-}
-#define DEF_HELPER_1_1(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(TCGv ret, TCGv arg1) \
-{ \
- tcg_gen_helper_1_1(helper_##name, ret, arg1); \
-}
-#define DEF_HELPER_1_2(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(TCGv ret, TCGv arg1, TCGv arg2) \
-{ \
- tcg_gen_helper_1_2(helper_##name, ret, arg1, arg2); \
-}
-#define DEF_HELPER_1_3(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(TCGv ret, \
- TCGv arg1, TCGv arg2, TCGv arg3) \
-{ \
- tcg_gen_helper_1_3(helper_##name, ret, arg1, arg2, arg3); \
-}
-#define DEF_HELPER_1_4(name, ret, args) \
-DEF_HELPER(name, ret, args) \
-static inline void gen_helper_##name(TCGv ret, \
- TCGv arg1, TCGv arg2, TCGv arg3, TCGv arg4) \
-{ \
- tcg_gen_helper_1_4(helper_##name, ret, arg1, arg2, arg3, arg4); \
-}
-#else /* !GEN_HELPER */
-#define DEF_HELPER_0_0 DEF_HELPER
-#define DEF_HELPER_0_1 DEF_HELPER
-#define DEF_HELPER_0_2 DEF_HELPER
-#define DEF_HELPER_0_3 DEF_HELPER
-#define DEF_HELPER_1_0 DEF_HELPER
-#define DEF_HELPER_1_1 DEF_HELPER
-#define DEF_HELPER_1_2 DEF_HELPER
-#define DEF_HELPER_1_3 DEF_HELPER
-#define DEF_HELPER_1_4 DEF_HELPER
-#define HELPER(x) glue(helper_,x)
-#endif
-
-DEF_HELPER_1_1(clz, uint32_t, (uint32_t))
-DEF_HELPER_1_1(sxtb16, uint32_t, (uint32_t))
-DEF_HELPER_1_1(uxtb16, uint32_t, (uint32_t))
-
-DEF_HELPER_1_2(add_setq, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(add_saturate, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(sub_saturate, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(add_usaturate, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(sub_usaturate, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_1(double_saturate, uint32_t, (int32_t))
-DEF_HELPER_1_2(sdiv, int32_t, (int32_t, int32_t))
-DEF_HELPER_1_2(udiv, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_1(rbit, uint32_t, (uint32_t))
-DEF_HELPER_1_1(abs, uint32_t, (uint32_t))
+#include "def-helper.h"
+
+DEF_HELPER_1(clz, i32, i32)
+DEF_HELPER_1(sxtb16, i32, i32)
+DEF_HELPER_1(uxtb16, i32, i32)
+
+DEF_HELPER_2(add_setq, i32, i32, i32)
+DEF_HELPER_2(add_saturate, i32, i32, i32)
+DEF_HELPER_2(sub_saturate, i32, i32, i32)
+DEF_HELPER_2(add_usaturate, i32, i32, i32)
+DEF_HELPER_2(sub_usaturate, i32, i32, i32)
+DEF_HELPER_1(double_saturate, i32, s32)
+DEF_HELPER_2(sdiv, s32, s32, s32)
+DEF_HELPER_2(udiv, i32, i32, i32)
+DEF_HELPER_1(rbit, i32, i32)
+DEF_HELPER_1(abs, i32, i32)
#ifdef CONFIG_TRACE
-DEF_HELPER_0_1(traceTicks,void,(uint32_t))
-DEF_HELPER_0_0(traceInsn,void,(void))
-DEF_HELPER_0_3(traceBB32,void,(uint32_t,uint32_t,uint32_t))
-DEF_HELPER_0_2(traceBB64,void,(uint64_t,uint64_t))
+DEF_HELPER_1(traceTicks, void, i32)
+DEF_HELPER_0(traceInsn, void)
+#if HOST_LONG_BITS == 32
+DEF_HELPER_3(traceBB32, void, i32, i32, i32)
+#endif
+#if HOST_LONG_BITS == 64
+DEF_HELPER_2(traceBB64, void, i64, i64)
+#endif
#endif
#define PAS_OP(pfx) \
- DEF_HELPER_1_3(pfx ## add8, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
- DEF_HELPER_1_3(pfx ## sub8, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
- DEF_HELPER_1_3(pfx ## sub16, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
- DEF_HELPER_1_3(pfx ## add16, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
- DEF_HELPER_1_3(pfx ## addsubx, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
- DEF_HELPER_1_3(pfx ## subaddx, uint32_t, (uint32_t, uint32_t, uint32_t *))
+ DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
PAS_OP(s)
PAS_OP(u)
#undef PAS_OP
#define PAS_OP(pfx) \
- DEF_HELPER_1_2(pfx ## add8, uint32_t, (uint32_t, uint32_t)) \
- DEF_HELPER_1_2(pfx ## sub8, uint32_t, (uint32_t, uint32_t)) \
- DEF_HELPER_1_2(pfx ## sub16, uint32_t, (uint32_t, uint32_t)) \
- DEF_HELPER_1_2(pfx ## add16, uint32_t, (uint32_t, uint32_t)) \
- DEF_HELPER_1_2(pfx ## addsubx, uint32_t, (uint32_t, uint32_t)) \
- DEF_HELPER_1_2(pfx ## subaddx, uint32_t, (uint32_t, uint32_t))
+ DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
PAS_OP(q)
PAS_OP(sh)
PAS_OP(uq)
PAS_OP(uh)
#undef PAS_OP
-DEF_HELPER_1_2(ssat, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(usat, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(ssat16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(usat16, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_2(usad8, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_1(logicq_cc, uint32_t, (uint64_t))
-
-DEF_HELPER_1_3(sel_flags, uint32_t, (uint32_t, uint32_t, uint32_t))
-DEF_HELPER_0_1(exception, void, (uint32_t))
-DEF_HELPER_0_0(wfi, void, (void))
-
-DEF_HELPER_0_2(cpsr_write, void, (uint32_t, uint32_t))
-DEF_HELPER_1_0(cpsr_read, uint32_t, (void))
-
-DEF_HELPER_0_3(v7m_msr, void, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_2(v7m_mrs, uint32_t, (CPUState *, uint32_t))
-
-DEF_HELPER_0_3(set_cp15, void, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_2(get_cp15, uint32_t, (CPUState *, uint32_t))
-
-DEF_HELPER_0_3(set_cp, void, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_2(get_cp, uint32_t, (CPUState *, uint32_t))
-
-DEF_HELPER_1_2(get_r13_banked, uint32_t, (CPUState *, uint32_t))
-DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t))
-
-DEF_HELPER_0_2(mark_exclusive, void, (CPUState *, uint32_t))
-DEF_HELPER_1_2(test_exclusive, uint32_t, (CPUState *, uint32_t))
-DEF_HELPER_0_1(clrex, void, (CPUState *))
-
-DEF_HELPER_1_1(get_user_reg, uint32_t, (uint32_t))
-DEF_HELPER_0_2(set_user_reg, void, (uint32_t, uint32_t))
-
-DEF_HELPER_1_1(vfp_get_fpscr, uint32_t, (CPUState *))
-DEF_HELPER_0_2(vfp_set_fpscr, void, (CPUState *, uint32_t))
-
-DEF_HELPER_1_3(vfp_adds, float32, (float32, float32, CPUState *))
-DEF_HELPER_1_3(vfp_addd, float64, (float64, float64, CPUState *))
-DEF_HELPER_1_3(vfp_subs, float32, (float32, float32, CPUState *))
-DEF_HELPER_1_3(vfp_subd, float64, (float64, float64, CPUState *))
-DEF_HELPER_1_3(vfp_muls, float32, (float32, float32, CPUState *))
-DEF_HELPER_1_3(vfp_muld, float64, (float64, float64, CPUState *))
-DEF_HELPER_1_3(vfp_divs, float32, (float32, float32, CPUState *))
-DEF_HELPER_1_3(vfp_divd, float64, (float64, float64, CPUState *))
-DEF_HELPER_1_1(vfp_negs, float32, (float32))
-DEF_HELPER_1_1(vfp_negd, float64, (float64))
-DEF_HELPER_1_1(vfp_abss, float32, (float32))
-DEF_HELPER_1_1(vfp_absd, float64, (float64))
-DEF_HELPER_1_2(vfp_sqrts, float32, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_sqrtd, float64, (float64, CPUState *))
-DEF_HELPER_0_3(vfp_cmps, void, (float32, float32, CPUState *))
-DEF_HELPER_0_3(vfp_cmpd, void, (float64, float64, CPUState *))
-DEF_HELPER_0_3(vfp_cmpes, void, (float32, float32, CPUState *))
-DEF_HELPER_0_3(vfp_cmped, void, (float64, float64, CPUState *))
-
-DEF_HELPER_1_2(vfp_fcvtds, float64, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_fcvtsd, float32, (float64, CPUState *))
-
-DEF_HELPER_1_2(vfp_uitos, float32, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_uitod, float64, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_sitos, float32, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_sitod, float64, (float32, CPUState *))
-
-DEF_HELPER_1_2(vfp_touis, float32, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_touid, float32, (float64, CPUState *))
-DEF_HELPER_1_2(vfp_touizs, float32, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_touizd, float32, (float64, CPUState *))
-DEF_HELPER_1_2(vfp_tosis, float32, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_tosid, float32, (float64, CPUState *))
-DEF_HELPER_1_2(vfp_tosizs, float32, (float32, CPUState *))
-DEF_HELPER_1_2(vfp_tosizd, float32, (float64, CPUState *))
-
-DEF_HELPER_1_3(vfp_toshs, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_tosls, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_touhs, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_touls, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_toshd, float64, (float64, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_tosld, float64, (float64, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_touhd, float64, (float64, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_tould, float64, (float64, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_shtos, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_sltos, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_uhtos, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_ultos, float32, (float32, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_shtod, float64, (float64, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_sltod, float64, (float64, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_uhtod, float64, (float64, uint32_t, CPUState *))
-DEF_HELPER_1_3(vfp_ultod, float64, (float64, uint32_t, CPUState *))
-
-DEF_HELPER_1_3(recps_f32, float32, (float32, float32, CPUState *))
-DEF_HELPER_1_3(rsqrts_f32, float32, (float32, float32, CPUState *))
-DEF_HELPER_1_2(recpe_f32, float32, (float32, CPUState *))
-DEF_HELPER_1_2(rsqrte_f32, float32, (float32, CPUState *))
-DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *))
-DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *))
-DEF_HELPER_1_4(neon_tbl, uint32_t, (uint32_t, uint32_t, uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_add_saturate_u64, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_add_saturate_s64, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_sub_saturate_u64, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_sub_saturate_s64, uint64_t, (uint64_t, uint64_t))
-
-DEF_HELPER_1_2(add_cc, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(adc_cc, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(sub_cc, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(sbc_cc, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_2(shl, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(shr, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(sar, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(ror, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(shl_cc, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(shr_cc, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(sar_cc, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(ror_cc, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_2(ssat, i32, i32, i32)
+DEF_HELPER_2(usat, i32, i32, i32)
+DEF_HELPER_2(ssat16, i32, i32, i32)
+DEF_HELPER_2(usat16, i32, i32, i32)
+
+DEF_HELPER_2(usad8, i32, i32, i32)
+
+DEF_HELPER_1(logicq_cc, i32, i64)
+
+DEF_HELPER_3(sel_flags, i32, i32, i32, i32)
+DEF_HELPER_1(exception, void, i32)
+DEF_HELPER_0(wfi, void)
+
+DEF_HELPER_2(cpsr_write, void, i32, i32)
+DEF_HELPER_0(cpsr_read, i32)
+
+DEF_HELPER_3(v7m_msr, void, env, i32, i32)
+DEF_HELPER_2(v7m_mrs, i32, env, i32)
+
+DEF_HELPER_3(set_cp15, void, env, i32, i32)
+DEF_HELPER_2(get_cp15, i32, env, i32)
+
+DEF_HELPER_3(set_cp, void, env, i32, i32)
+DEF_HELPER_2(get_cp, i32, env, i32)
+
+DEF_HELPER_2(get_r13_banked, i32, env, i32)
+DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
+
+DEF_HELPER_2(mark_exclusive, void, env, i32)
+DEF_HELPER_2(test_exclusive, i32, env, i32)
+DEF_HELPER_1(clrex, void, env)
+
+DEF_HELPER_1(get_user_reg, i32, i32)
+DEF_HELPER_2(set_user_reg, void, i32, i32)
+
+DEF_HELPER_1(vfp_get_fpscr, i32, env)
+DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(vfp_adds, f32, f32, f32, env)
+DEF_HELPER_3(vfp_addd, f64, f64, f64, env)
+DEF_HELPER_3(vfp_subs, f32, f32, f32, env)
+DEF_HELPER_3(vfp_subd, f64, f64, f64, env)
+DEF_HELPER_3(vfp_muls, f32, f32, f32, env)
+DEF_HELPER_3(vfp_muld, f64, f64, f64, env)
+DEF_HELPER_3(vfp_divs, f32, f32, f32, env)
+DEF_HELPER_3(vfp_divd, f64, f64, f64, env)
+DEF_HELPER_1(vfp_negs, f32, f32)
+DEF_HELPER_1(vfp_negd, f64, f64)
+DEF_HELPER_1(vfp_abss, f32, f32)
+DEF_HELPER_1(vfp_absd, f64, f64)
+DEF_HELPER_2(vfp_sqrts, f32, f32, env)
+DEF_HELPER_2(vfp_sqrtd, f64, f64, env)
+DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
+DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
+
+DEF_HELPER_2(vfp_fcvtds, f64, f32, env)
+DEF_HELPER_2(vfp_fcvtsd, f32, f64, env)
+
+DEF_HELPER_2(vfp_uitos, f32, f32, env)
+DEF_HELPER_2(vfp_uitod, f64, f32, env)
+DEF_HELPER_2(vfp_sitos, f32, f32, env)
+DEF_HELPER_2(vfp_sitod, f64, f32, env)
+
+DEF_HELPER_2(vfp_touis, f32, f32, env)
+DEF_HELPER_2(vfp_touid, f32, f64, env)
+DEF_HELPER_2(vfp_touizs, f32, f32, env)
+DEF_HELPER_2(vfp_touizd, f32, f64, env)
+DEF_HELPER_2(vfp_tosis, f32, f32, env)
+DEF_HELPER_2(vfp_tosid, f32, f64, env)
+DEF_HELPER_2(vfp_tosizs, f32, f32, env)
+DEF_HELPER_2(vfp_tosizd, f32, f64, env)
+
+DEF_HELPER_3(vfp_toshs, f32, f32, i32, env)
+DEF_HELPER_3(vfp_tosls, f32, f32, i32, env)
+DEF_HELPER_3(vfp_touhs, f32, f32, i32, env)
+DEF_HELPER_3(vfp_touls, f32, f32, i32, env)
+DEF_HELPER_3(vfp_toshd, f64, f64, i32, env)
+DEF_HELPER_3(vfp_tosld, f64, f64, i32, env)
+DEF_HELPER_3(vfp_touhd, f64, f64, i32, env)
+DEF_HELPER_3(vfp_tould, f64, f64, i32, env)
+DEF_HELPER_3(vfp_shtos, f32, f32, i32, env)
+DEF_HELPER_3(vfp_sltos, f32, f32, i32, env)
+DEF_HELPER_3(vfp_uhtos, f32, f32, i32, env)
+DEF_HELPER_3(vfp_ultos, f32, f32, i32, env)
+DEF_HELPER_3(vfp_shtod, f64, f64, i32, env)
+DEF_HELPER_3(vfp_sltod, f64, f64, i32, env)
+DEF_HELPER_3(vfp_uhtod, f64, f64, i32, env)
+DEF_HELPER_3(vfp_ultod, f64, f64, i32, env)
+
+DEF_HELPER_3(recps_f32, f32, f32, f32, env)
+DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env)
+DEF_HELPER_2(recpe_f32, f32, f32, env)
+DEF_HELPER_2(rsqrte_f32, f32, f32, env)
+DEF_HELPER_2(recpe_u32, i32, i32, env)
+DEF_HELPER_2(rsqrte_u32, i32, i32, env)
+DEF_HELPER_4(neon_tbl, i32, i32, i32, i32, i32)
+DEF_HELPER_2(neon_add_saturate_u64, i64, i64, i64)
+DEF_HELPER_2(neon_add_saturate_s64, i64, i64, i64)
+DEF_HELPER_2(neon_sub_saturate_u64, i64, i64, i64)
+DEF_HELPER_2(neon_sub_saturate_s64, i64, i64, i64)
+
+DEF_HELPER_2(add_cc, i32, i32, i32)
+DEF_HELPER_2(adc_cc, i32, i32, i32)
+DEF_HELPER_2(sub_cc, i32, i32, i32)
+DEF_HELPER_2(sbc_cc, i32, i32, i32)
+
+DEF_HELPER_2(shl, i32, i32, i32)
+DEF_HELPER_2(shr, i32, i32, i32)
+DEF_HELPER_2(sar, i32, i32, i32)
+DEF_HELPER_2(ror, i32, i32, i32)
+DEF_HELPER_2(shl_cc, i32, i32, i32)
+DEF_HELPER_2(shr_cc, i32, i32, i32)
+DEF_HELPER_2(sar_cc, i32, i32, i32)
+DEF_HELPER_2(ror_cc, i32, i32, i32)
/* neon_helper.c */
-DEF_HELPER_1_3(neon_qadd_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qadd_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qadd_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qadd_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qsub_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qsub_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qsub_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qsub_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
-
-DEF_HELPER_1_2(neon_hadd_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hadd_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hadd_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hadd_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hadd_s32, int32_t, (int32_t, int32_t))
-DEF_HELPER_1_2(neon_hadd_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rhadd_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rhadd_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rhadd_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rhadd_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rhadd_s32, int32_t, (int32_t, int32_t))
-DEF_HELPER_1_2(neon_rhadd_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hsub_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hsub_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hsub_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hsub_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_hsub_s32, int32_t, (int32_t, int32_t))
-DEF_HELPER_1_2(neon_hsub_u32, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_2(neon_cgt_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cgt_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cgt_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cgt_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cgt_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cgt_s32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cge_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cge_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cge_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cge_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cge_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cge_s32, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_2(neon_min_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_min_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_min_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_min_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_min_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_min_s32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_max_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_max_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_max_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_max_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_max_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_max_s32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmin_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmin_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmin_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmin_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmin_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmin_s32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmax_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmax_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmax_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmax_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmax_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_pmax_s32, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_2(neon_abd_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abd_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abd_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abd_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abd_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abd_s32, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_2(neon_shl_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_shl_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_shl_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_shl_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_shl_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_shl_s32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_shl_u64, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_shl_s64, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_rshl_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rshl_s8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rshl_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rshl_s16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rshl_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rshl_s32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_rshl_u64, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_rshl_s64, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_3(neon_qshl_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qshl_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qshl_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qshl_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qshl_u32, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qshl_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qshl_u64, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(neon_qshl_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(neon_qrshl_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrshl_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrshl_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrshl_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrshl_u32, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrshl_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrshl_u64, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(neon_qrshl_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
-
-DEF_HELPER_1_2(neon_add_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_add_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_padd_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_padd_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_sub_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_sub_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mul_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mul_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mul_p8, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_2(neon_tst_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_tst_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_tst_u32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_ceq_u8, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_ceq_u16, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_ceq_u32, uint32_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_1(neon_abs_s8, uint32_t, (uint32_t))
-DEF_HELPER_1_1(neon_abs_s16, uint32_t, (uint32_t))
-DEF_HELPER_1_1(neon_clz_u8, uint32_t, (uint32_t))
-DEF_HELPER_1_1(neon_clz_u16, uint32_t, (uint32_t))
-DEF_HELPER_1_1(neon_cls_s8, uint32_t, (uint32_t))
-DEF_HELPER_1_1(neon_cls_s16, uint32_t, (uint32_t))
-DEF_HELPER_1_1(neon_cls_s32, uint32_t, (uint32_t))
-DEF_HELPER_1_1(neon_cnt_u8, uint32_t, (uint32_t))
-
-DEF_HELPER_1_3(neon_qdmulh_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrdmulh_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qdmulh_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
-DEF_HELPER_1_3(neon_qrdmulh_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
-
-DEF_HELPER_1_1(neon_narrow_u8, uint32_t, (uint64_t))
-DEF_HELPER_1_1(neon_narrow_u16, uint32_t, (uint64_t))
-DEF_HELPER_1_2(neon_narrow_sat_u8, uint32_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(neon_narrow_sat_s8, uint32_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(neon_narrow_sat_u16, uint32_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(neon_narrow_sat_s16, uint32_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(neon_narrow_sat_u32, uint32_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(neon_narrow_sat_s32, uint32_t, (CPUState *, uint64_t))
-DEF_HELPER_1_1(neon_narrow_high_u8, uint32_t, (uint64_t))
-DEF_HELPER_1_1(neon_narrow_high_u16, uint32_t, (uint64_t))
-DEF_HELPER_1_1(neon_narrow_round_high_u8, uint32_t, (uint64_t))
-DEF_HELPER_1_1(neon_narrow_round_high_u16, uint32_t, (uint64_t))
-DEF_HELPER_1_1(neon_widen_u8, uint64_t, (uint32_t))
-DEF_HELPER_1_1(neon_widen_s8, uint64_t, (uint32_t))
-DEF_HELPER_1_1(neon_widen_u16, uint64_t, (uint32_t))
-DEF_HELPER_1_1(neon_widen_s16, uint64_t, (uint32_t))
-
-DEF_HELPER_1_2(neon_addl_u16, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_addl_u32, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_paddl_u16, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_paddl_u32, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_subl_u16, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_subl_u32, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_3(neon_addl_saturate_s32, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(neon_addl_saturate_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_2(neon_abdl_u16, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abdl_s16, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abdl_u32, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abdl_s32, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abdl_u64, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abdl_s64, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mull_u8, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mull_s8, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mull_u16, uint64_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mull_s16, uint64_t, (uint32_t, uint32_t))
-
-DEF_HELPER_1_1(neon_negl_u16, uint64_t, (uint64_t))
-DEF_HELPER_1_1(neon_negl_u32, uint64_t, (uint64_t))
-DEF_HELPER_1_1(neon_negl_u64, uint64_t, (uint64_t))
-
-DEF_HELPER_1_2(neon_qabs_s8, uint32_t, (CPUState *, uint32_t))
-DEF_HELPER_1_2(neon_qabs_s16, uint32_t, (CPUState *, uint32_t))
-DEF_HELPER_1_2(neon_qabs_s32, uint32_t, (CPUState *, uint32_t))
-DEF_HELPER_1_2(neon_qneg_s8, uint32_t, (CPUState *, uint32_t))
-DEF_HELPER_1_2(neon_qneg_s16, uint32_t, (CPUState *, uint32_t))
-DEF_HELPER_1_2(neon_qneg_s32, uint32_t, (CPUState *, uint32_t))
-
-DEF_HELPER_0_0(neon_trn_u8, void, (void))
-DEF_HELPER_0_0(neon_trn_u16, void, (void))
-DEF_HELPER_0_0(neon_unzip_u8, void, (void))
-DEF_HELPER_0_0(neon_zip_u8, void, (void))
-DEF_HELPER_0_0(neon_zip_u16, void, (void))
-
-DEF_HELPER_1_2(neon_min_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_max_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_abd_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_add_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_sub_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_mul_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_ceq_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cge_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_cgt_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_acge_f32, uint32_t, (uint32_t, uint32_t))
-DEF_HELPER_1_2(neon_acgt_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32)
+
+DEF_HELPER_2(neon_hadd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_s32, s32, s32, s32)
+DEF_HELPER_2(neon_hadd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32)
+DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s8, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s16, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s32, s32, s32, s32)
+DEF_HELPER_2(neon_hsub_u32, i32, i32, i32)
+
+DEF_HELPER_2(neon_cgt_u8, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_s8, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_u16, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_s16, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_u32, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_s32, i32, i32, i32)
+DEF_HELPER_2(neon_cge_u8, i32, i32, i32)
+DEF_HELPER_2(neon_cge_s8, i32, i32, i32)
+DEF_HELPER_2(neon_cge_u16, i32, i32, i32)
+DEF_HELPER_2(neon_cge_s16, i32, i32, i32)
+DEF_HELPER_2(neon_cge_u32, i32, i32, i32)
+DEF_HELPER_2(neon_cge_s32, i32, i32, i32)
+
+DEF_HELPER_2(neon_min_u8, i32, i32, i32)
+DEF_HELPER_2(neon_min_s8, i32, i32, i32)
+DEF_HELPER_2(neon_min_u16, i32, i32, i32)
+DEF_HELPER_2(neon_min_s16, i32, i32, i32)
+DEF_HELPER_2(neon_min_u32, i32, i32, i32)
+DEF_HELPER_2(neon_min_s32, i32, i32, i32)
+DEF_HELPER_2(neon_max_u8, i32, i32, i32)
+DEF_HELPER_2(neon_max_s8, i32, i32, i32)
+DEF_HELPER_2(neon_max_u16, i32, i32, i32)
+DEF_HELPER_2(neon_max_s16, i32, i32, i32)
+DEF_HELPER_2(neon_max_u32, i32, i32, i32)
+DEF_HELPER_2(neon_max_s32, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
+
+DEF_HELPER_2(neon_abd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_abd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_abd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_abd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_abd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_abd_s32, i32, i32, i32)
+
+DEF_HELPER_2(neon_shl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_shl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_shl_s64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
+
+DEF_HELPER_2(neon_add_u8, i32, i32, i32)
+DEF_HELPER_2(neon_add_u16, i32, i32, i32)
+DEF_HELPER_2(neon_padd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_padd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_p8, i32, i32, i32)
+
+DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
+DEF_HELPER_2(neon_ceq_u8, i32, i32, i32)
+DEF_HELPER_2(neon_ceq_u16, i32, i32, i32)
+DEF_HELPER_2(neon_ceq_u32, i32, i32, i32)
+
+DEF_HELPER_1(neon_abs_s8, i32, i32)
+DEF_HELPER_1(neon_abs_s16, i32, i32)
+DEF_HELPER_1(neon_clz_u8, i32, i32)
+DEF_HELPER_1(neon_clz_u16, i32, i32)
+DEF_HELPER_1(neon_cls_s8, i32, i32)
+DEF_HELPER_1(neon_cls_s16, i32, i32)
+DEF_HELPER_1(neon_cls_s32, i32, i32)
+DEF_HELPER_1(neon_cnt_u8, i32, i32)
+
+DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
+
+DEF_HELPER_1(neon_narrow_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_u16, i32, i64)
+DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
+DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
+DEF_HELPER_1(neon_widen_u8, i64, i32)
+DEF_HELPER_1(neon_widen_s8, i64, i32)
+DEF_HELPER_1(neon_widen_u16, i64, i32)
+DEF_HELPER_1(neon_widen_s16, i64, i32)
+
+DEF_HELPER_2(neon_addl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_addl_u32, i64, i64, i64)
+DEF_HELPER_2(neon_paddl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_paddl_u32, i64, i64, i64)
+DEF_HELPER_2(neon_subl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_subl_u32, i64, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
+DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
+
+DEF_HELPER_1(neon_negl_u16, i64, i64)
+DEF_HELPER_1(neon_negl_u32, i64, i64)
+DEF_HELPER_1(neon_negl_u64, i64, i64)
+
+DEF_HELPER_2(neon_qabs_s8, i32, env, i32)
+DEF_HELPER_2(neon_qabs_s16, i32, env, i32)
+DEF_HELPER_2(neon_qabs_s32, i32, env, i32)
+DEF_HELPER_2(neon_qneg_s8, i32, env, i32)
+DEF_HELPER_2(neon_qneg_s16, i32, env, i32)
+DEF_HELPER_2(neon_qneg_s32, i32, env, i32)
+
+DEF_HELPER_0(neon_trn_u8, void)
+DEF_HELPER_0(neon_trn_u16, void)
+DEF_HELPER_0(neon_unzip_u8, void)
+DEF_HELPER_0(neon_zip_u8, void)
+DEF_HELPER_0(neon_zip_u16, void)
+
+DEF_HELPER_2(neon_min_f32, i32, i32, i32)
+DEF_HELPER_2(neon_max_f32, i32, i32, i32)
+DEF_HELPER_2(neon_abd_f32, i32, i32, i32)
+DEF_HELPER_2(neon_add_f32, i32, i32, i32)
+DEF_HELPER_2(neon_sub_f32, i32, i32, i32)
+DEF_HELPER_2(neon_mul_f32, i32, i32, i32)
+DEF_HELPER_2(neon_ceq_f32, i32, i32, i32)
+DEF_HELPER_2(neon_cge_f32, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_f32, i32, i32, i32)
+DEF_HELPER_2(neon_acge_f32, i32, i32, i32)
+DEF_HELPER_2(neon_acgt_f32, i32, i32, i32)
/* iwmmxt_helper.c */
-DEF_HELPER_1_2(iwmmxt_maddsq, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_madduq, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_sadb, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_sadw, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_mulslw, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_mulshw, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_mululw, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_muluhw, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_macsw, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_2(iwmmxt_macuw, uint64_t, (uint64_t, uint64_t))
-DEF_HELPER_1_1(iwmmxt_setpsr_nz, uint32_t, (uint64_t))
+DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
+DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
-DEF_HELPER_1_3(iwmmxt_##name##b, uint64_t, (CPUState *, uint64_t, uint64_t)) \
-DEF_HELPER_1_3(iwmmxt_##name##w, uint64_t, (CPUState *, uint64_t, uint64_t)) \
-DEF_HELPER_1_3(iwmmxt_##name##l, uint64_t, (CPUState *, uint64_t, uint64_t)) \
+DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
-DEF_HELPER_1_2(iwmmxt_unpacklub, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpackluw, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpacklul, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpackhub, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpackhuw, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpackhul, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpacklsb, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpacklsw, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpacklsl, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpackhsb, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpackhsw, uint64_t, (CPUState *, uint64_t))
-DEF_HELPER_1_2(iwmmxt_unpackhsl, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
@@ -488,61 +417,53 @@ DEF_IWMMXT_HELPER_SIZE_ENV(addu)
DEF_IWMMXT_HELPER_SIZE_ENV(subs)
DEF_IWMMXT_HELPER_SIZE_ENV(adds)
-DEF_HELPER_1_3(iwmmxt_avgb0, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_avgb1, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_avgw0, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_avgw1, uint64_t, (CPUState *, uint64_t, uint64_t))
-
-DEF_HELPER_1_2(iwmmxt_msadb, uint64_t, (uint64_t, uint64_t))
-
-DEF_HELPER_1_3(iwmmxt_align, uint64_t, (uint64_t, uint64_t, uint32_t))
-DEF_HELPER_1_4(iwmmxt_insr, uint64_t, (uint64_t, uint32_t, uint32_t, uint32_t))
-
-DEF_HELPER_1_1(iwmmxt_bcstb, uint64_t, (uint32_t))
-DEF_HELPER_1_1(iwmmxt_bcstw, uint64_t, (uint32_t))
-DEF_HELPER_1_1(iwmmxt_bcstl, uint64_t, (uint32_t))
-
-DEF_HELPER_1_1(iwmmxt_addcb, uint64_t, (uint64_t))
-DEF_HELPER_1_1(iwmmxt_addcw, uint64_t, (uint64_t))
-DEF_HELPER_1_1(iwmmxt_addcl, uint64_t, (uint64_t))
-
-DEF_HELPER_1_1(iwmmxt_msbb, uint32_t, (uint64_t))
-DEF_HELPER_1_1(iwmmxt_msbw, uint32_t, (uint64_t))
-DEF_HELPER_1_1(iwmmxt_msbl, uint32_t, (uint64_t))
-
-DEF_HELPER_1_3(iwmmxt_srlw, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_srll, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_srlq, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_sllw, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_slll, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_sllq, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_sraw, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_sral, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_sraq, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_rorw, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_rorl, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_rorq, uint64_t, (CPUState *, uint64_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_shufh, uint64_t, (CPUState *, uint64_t, uint32_t))
-
-DEF_HELPER_1_3(iwmmxt_packuw, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_packul, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_packuq, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_packsw, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_packsl, uint64_t, (CPUState *, uint64_t, uint64_t))
-DEF_HELPER_1_3(iwmmxt_packsq, uint64_t, (CPUState *, uint64_t, uint64_t))
-
-DEF_HELPER_1_3(iwmmxt_muladdsl, uint64_t, (uint64_t, uint32_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_muladdsw, uint64_t, (uint64_t, uint32_t, uint32_t))
-DEF_HELPER_1_3(iwmmxt_muladdswl, uint64_t, (uint64_t, uint32_t, uint32_t))
-
-#undef DEF_HELPER
-#undef DEF_HELPER_0_0
-#undef DEF_HELPER_0_1
-#undef DEF_HELPER_0_2
-#undef DEF_HELPER_0_3
-#undef DEF_HELPER_1_0
-#undef DEF_HELPER_1_1
-#undef DEF_HELPER_1_2
-#undef DEF_HELPER_1_3
-#undef DEF_HELPER_1_4
-#undef GEN_HELPER
+DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
+
+DEF_HELPER_2(iwmmxt_msadb, i64, i64, i64)
+
+DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
+DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
+
+DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
+
+DEF_HELPER_1(iwmmxt_addcb, i64, i64)
+DEF_HELPER_1(iwmmxt_addcw, i64, i64)
+DEF_HELPER_1(iwmmxt_addcl, i64, i64)
+
+DEF_HELPER_1(iwmmxt_msbb, i32, i64)
+DEF_HELPER_1(iwmmxt_msbw, i32, i64)
+DEF_HELPER_1(iwmmxt_msbl, i32, i64)
+
+DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
+
+DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
+
+DEF_HELPER_2(set_teecr, void, env, i32)
+
+#include "def-helper.h"
diff --git a/target-arm/iwmmxt_helper.c b/target-arm/iwmmxt_helper.c
index 6e801c8..2e4193e 100644
--- a/target-arm/iwmmxt_helper.c
+++ b/target-arm/iwmmxt_helper.c
@@ -17,7 +17,7 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include <stdlib.h>
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 3368741..b1deacb 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -1,31 +1,6 @@
#include "hw/hw.h"
#include "hw/boards.h"
-void register_machines(void)
-{
-#if 0 /* ANDROID */
- qemu_register_machine(&integratorcp_machine);
- qemu_register_machine(&versatilepb_machine);
- qemu_register_machine(&versatileab_machine);
- qemu_register_machine(&realview_machine);
- qemu_register_machine(&akitapda_machine);
- qemu_register_machine(&spitzpda_machine);
- qemu_register_machine(&borzoipda_machine);
- qemu_register_machine(&terrierpda_machine);
- qemu_register_machine(&palmte_machine);
- qemu_register_machine(&n800_machine);
- qemu_register_machine(&n810_machine);
- qemu_register_machine(&lm3s811evb_machine);
- qemu_register_machine(&lm3s6965evb_machine);
- qemu_register_machine(&connex_machine);
- qemu_register_machine(&verdex_machine);
- qemu_register_machine(&mainstone2_machine);
- qemu_register_machine(&musicpal_machine);
- qemu_register_machine(&tosapda_machine);
-#endif
- qemu_register_machine(&android_arm_machine);
-}
-
void cpu_save(QEMUFile *f, void *opaque)
{
int i;
@@ -214,5 +189,3 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-
-
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
index 4ee5658..35fbaf5 100644
--- a/target-arm/neon_helper.c
+++ b/target-arm/neon_helper.c
@@ -456,11 +456,11 @@ uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
if (tmp >= sizeof(src1) * 8) { \
dest = 0; \
} else if (tmp < -sizeof(src1) * 8) { \
- dest >>= sizeof(src1) * 8 - 1; \
+ dest = src1 >> (sizeof(src1) * 8 - 1); \
} else if (tmp == -sizeof(src1) * 8) { \
dest = src1 >> (tmp - 1); \
dest++; \
- src2 >>= 1; \
+ dest >>= 1; \
} else if (tmp < 0) { \
dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
} else { \
diff --git a/target-arm/op_addsub.h b/target-arm/op_addsub.h
index 376ee27..29f77ba 100644
--- a/target-arm/op_addsub.h
+++ b/target-arm/op_addsub.h
@@ -8,9 +8,9 @@
*/
#ifdef ARITH_GE
-#define GE_ARG , uint32_t *gep
+#define GE_ARG , void *gep
#define DECLARE_GE uint32_t ge = 0
-#define SET_GE *gep = ge
+#define SET_GE *(uint32_t *)gep = ge
#else
#define GE_ARG
#define DECLARE_GE do{}while(0)
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 36de55b..64bab2b 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -15,7 +15,7 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include "exec.h"
#include "helpers.h"
@@ -31,7 +31,7 @@ void raise_exception(int tt)
/* thread support */
-spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
void cpu_lock(void)
{
@@ -56,7 +56,7 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def,
for (shift = 0; shift < 32; shift += 8) {
index = (ireg >> shift) & 0xff;
if (index < maxindex) {
- tmp = (table[index >> 3] >> (index & 7)) & 0xff;
+ tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
val |= tmp << shift;
} else {
val |= def & (0xff << shift);
@@ -129,117 +129,20 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
env = saved_env;
}
-#if 1
-#include <string.h>
-/*
- * The following functions are address translation helper functions
- * for fast memory access in QEMU.
- */
-static target_phys_addr_t v2p_mmu(target_ulong addr, int mmu_idx)
-{
- int index;
- target_ulong tlb_addr;
- target_phys_addr_t physaddr;
- void *retaddr;
-
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
-redo:
- tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
- if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
- } else {
- /* the page is not in the TLB : fill it */
- retaddr = GETPC();
- tlb_fill(addr, 0, mmu_idx, retaddr);
- goto redo;
- }
- return physaddr;
-}
-
-/*
- * translation from virtual address of simulated OS
- * to the address of simulation host (not the physical
- * address of simulated OS.
- */
-target_phys_addr_t v2p(target_ulong ptr, int mmu_idx)
-{
- CPUState *saved_env;
- int index;
- target_ulong addr;
- target_phys_addr_t physaddr;
-
- saved_env = env;
- env = cpu_single_env;
- addr = ptr;
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- if (__builtin_expect(env->tlb_table[mmu_idx][index].addr_read !=
- (addr & TARGET_PAGE_MASK), 0))
- {
- physaddr = v2p_mmu(addr, mmu_idx);
- } else {
- physaddr = (target_phys_addr_t)addr + env->tlb_table[mmu_idx][index].addend;
- }
- env = saved_env;
- return physaddr;
-}
-
-#define MINSIZE(x,y) ((x) < (y) ? (x) : (y))
-/* copy memory from the simulated virtual space to a buffer in QEMU */
-void vmemcpy(target_ulong ptr, char *buf, int size)
-{
- if (buf == NULL) return;
- while (size) {
- int page_remain = TARGET_PAGE_SIZE - (ptr & ~TARGET_PAGE_MASK);
- int to_copy = MINSIZE(size, page_remain);
- char *phys = (char *)v2p(ptr, 0);
- if (phys == NULL) return;
- memcpy(buf, phys, to_copy);
- ptr += to_copy;
- buf += to_copy;
- size -= to_copy;
- }
-}
-
-/* copy memory from the QEMU buffer to simulated virtual space */
-void pmemcpy(target_ulong ptr, const char *buf, int size)
-{
- if (buf == NULL) return;
- while (size) {
- int page_remain = TARGET_PAGE_SIZE - (ptr & ~TARGET_PAGE_MASK);
- int to_copy = MINSIZE(size, page_remain);
- char *phys = (char *)v2p(ptr, 0);
- if (phys == NULL) return;
- memcpy(phys, buf, to_copy);
- ptr += to_copy;
- buf += to_copy;
- size -= to_copy;
- }
-}
-
/* copy a string from the simulated virtual space to a buffer in QEMU */
void vstrcpy(target_ulong ptr, char *buf, int max)
{
- char *phys = 0;
- unsigned long page = 0;
+ int index;
if (buf == NULL) return;
- while (max) {
- if ((ptr & TARGET_PAGE_MASK) != page) {
- phys = (char *)v2p(ptr, 0);
- page = ptr & TARGET_PAGE_MASK;
- }
- *buf = *phys;
- if (*phys == '\0')
- return;
- ptr ++;
- buf ++;
- phys ++;
- max --;
+ for (index = 0; index < max; index += 1) {
+ cpu_physical_memory_read(ptr + index, buf + index, 1);
+ if (buf[index] == 0)
+ break;
}
}
#endif
-#endif
/* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating
instructions into helper.c */
@@ -434,7 +337,7 @@ void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
uint32_t HELPER (add_cc)(uint32_t a, uint32_t b)
{
uint32_t result;
- result = T0 + T1;
+ result = a + b;
env->NF = env->ZF = result;
env->CF = result < a;
env->VF = (a ^ b ^ -1) & (a ^ result);
@@ -636,7 +539,6 @@ void HELPER(neon_trn_u8)(void)
rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00);
T0 = rd;
T1 = rm;
- FORCE_RET();
}
void HELPER(neon_trn_u16)(void)
@@ -647,7 +549,6 @@ void HELPER(neon_trn_u16)(void)
rm = (T1 >> 16) | (T0 & 0xffff0000);
T0 = rd;
T1 = rm;
- FORCE_RET();
}
/* Worker routines for zip and unzip. */
@@ -661,7 +562,6 @@ void HELPER(neon_unzip_u8)(void)
| ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
T0 = rd;
T1 = rm;
- FORCE_RET();
}
void HELPER(neon_zip_u8)(void)
@@ -674,7 +574,6 @@ void HELPER(neon_zip_u8)(void)
| ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000);
T0 = rd;
T1 = rm;
- FORCE_RET();
}
void HELPER(neon_zip_u16)(void)
@@ -684,5 +583,4 @@ void HELPER(neon_zip_u16)(void)
tmp = (T0 & 0xffff) | (T1 << 16);
T1 = (T1 & 0xffff0000) | (T0 >> 16);
T0 = tmp;
- FORCE_RET();
}
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 23db9f7..ad3ab44 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -17,7 +17,7 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include <stdarg.h>
#include <stdlib.h>
@@ -35,6 +35,7 @@
#include "trace.h"
#endif
+#include "helpers.h"
#define GEN_HELPER 1
#include "helpers.h"
@@ -44,7 +45,7 @@
#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
-#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
+#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
/* internal defines */
typedef struct DisasContext {
@@ -60,7 +61,6 @@ typedef struct DisasContext {
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
- int is_mem;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
@@ -74,20 +74,30 @@ typedef struct DisasContext {
#ifdef CONFIG_TRACE
#include "helpers.h"
+#if HOST_LONG_BITS == 32
+# define gen_helper_traceBB(num,tb) \
+ gen_helper_traceBB32((uint32_t)((num) >> 32), (uint32_t)(num), (tb))
+#elif HOST_LONG_BITS == 64
+# define gen_helper_traceBB gen_helper_traceBB64
+#else
+# error Unsupported HOST_LONG_BITS value
#endif
+#
+#endif /* CONFIG_TRACE */
/* These instructions trap after executing, so defer them until after the
conditional executions state has been updated. */
#define DISAS_WFI 4
#define DISAS_SWI 5
-static TCGv cpu_env;
+static TCGv_ptr cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
-static TCGv cpu_V0, cpu_V1, cpu_M0;
+static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
/* FIXME: These should be removed. */
static TCGv cpu_T[2];
-static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
+static TCGv cpu_F0s, cpu_F1s;
+static TCGv_i64 cpu_F0d, cpu_F1d;
#define ICOUNT_TEMP cpu_T[0]
#include "gen-icount.h"
@@ -95,10 +105,13 @@ static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
/* initialize TCG globals. */
void arm_translate_init(void)
{
- cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+
+ cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
+ cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
- cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
- cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
+#define GEN_HELPER 2
+#include "helpers.h"
}
/* The code generator doesn't like lots of temporaries, so maintain our own
@@ -108,16 +121,16 @@ static int num_temps;
static TCGv temps[MAX_TEMPS];
/* Allocate a temporary variable. */
-static TCGv new_tmp(void)
+static TCGv_i32 new_tmp(void)
{
TCGv tmp;
if (num_temps == MAX_TEMPS)
abort();
- if (GET_TCGV(temps[num_temps]))
+ if (GET_TCGV_I32(temps[num_temps]))
return temps[num_temps++];
- tmp = tcg_temp_new(TCG_TYPE_I32);
+ tmp = tcg_temp_new_i32();
temps[num_temps++] = tmp;
return tmp;
}
@@ -128,11 +141,11 @@ static void dead_tmp(TCGv tmp)
int i;
num_temps--;
i = num_temps;
- if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
+ if (TCGV_EQUAL(temps[i], tmp))
return;
/* Shuffle this temp to the last slot. */
- while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
+ while (!TCGV_EQUAL(temps[i], tmp))
i--;
while (i < num_temps) {
temps[i] = temps[i + 1];
@@ -198,7 +211,6 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
/* Basic operations. */
#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
-#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
@@ -212,7 +224,6 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
-#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
@@ -222,11 +233,8 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
-#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
-#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
-#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
/* Value extensions. */
#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
@@ -243,38 +251,6 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
/* Set NZCV flags from the high 4 bits of var. */
#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
-#ifdef CONFIG_TRACE
-static void gen_traceTicks(int count)
-{
- TCGv t0 = new_tmp();
- tcg_gen_movi_i32(t0, count);
- gen_helper_traceTicks(t0);
- dead_tmp(t0);
-}
-
-static void gen_traceBB(uint64_t bb_num, target_phys_addr_t tb)
-{
-#if HOST_LONG_BITS ==64
- TCGv t0 = tcg_const_i64(bb_num);
- TCGv t1 = tcg_const_i64(tb);
- gen_helper_traceBB64(t0, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
-#else
- TCGv t0 = new_tmp();
- TCGv t1 = new_tmp();
- TCGv t2 = new_tmp();
- tcg_gen_movi_i32(t0, (int32_t)(bb_num >> 32));
- tcg_gen_movi_i32(t1, (int32_t)(bb_num));
- tcg_gen_movi_i32(t2, (int32_t)tb);
- gen_helper_traceBB32(t0, t1, t2);
- dead_tmp(t2);
- dead_tmp(t1);
- dead_tmp(t0);
-#endif
-}
-#endif /* CONFIG_TRACE */
-
static void gen_exception(int excp)
{
TCGv tmp = new_tmp();
@@ -364,10 +340,10 @@ static void gen_roundqd(TCGv a, TCGv b)
/* FIXME: Most targets have native widening multiplication.
It would be good to use that instead of a full wide multiply. */
/* 32x32->64 multiply. Marks inputs as dead. */
-static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
+static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp1, a);
dead_tmp(a);
@@ -377,10 +353,10 @@ static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
return tmp1;
}
-static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
+static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp1, a);
dead_tmp(a);
@@ -393,8 +369,8 @@ static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
/* Unsigned 32x32->64 multiply. */
static void gen_op_mull_T0_T1(void)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
@@ -407,8 +383,8 @@ static void gen_op_mull_T0_T1(void)
/* Signed 32x32->64 multiply. */
static void gen_imull(TCGv a, TCGv b)
{
- TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
- TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp1, a);
tcg_gen_ext_i32_i64(tmp2, b);
@@ -456,7 +432,7 @@ static void gen_set_CF_bit31(TCGv var)
{
TCGv tmp = new_tmp();
tcg_gen_shri_i32(tmp, var, 31);
- gen_set_CF(var);
+ gen_set_CF(tmp);
dead_tmp(tmp);
}
@@ -477,6 +453,16 @@ static void gen_adc_T0_T1(void)
dead_tmp(tmp);
}
+/* dest = T0 + T1 + CF. */
+static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_add_i32(dest, t0, t1);
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(dest, dest, tmp);
+ dead_tmp(tmp);
+}
+
/* dest = T0 - T1 + CF - 1. */
static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
{
@@ -531,7 +517,7 @@ static void shifter_out_im(TCGv var, int shift)
tcg_gen_andi_i32(tmp, var, 1);
} else {
tcg_gen_shri_i32(tmp, var, shift);
- if (shift != 31);
+ if (shift != 31)
tcg_gen_andi_i32(tmp, tmp, 1);
}
gen_set_CF(tmp);
@@ -620,17 +606,17 @@ static inline void gen_arm_shift_reg(TCGv var, int shiftop,
}
static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
{
- TCGv tmp;
+ TCGv_ptr tmp;
switch (op1) {
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 1:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(s)
break;
case 5:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(u)
break;
@@ -665,17 +651,17 @@ static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
}
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
{
- TCGv tmp;
+ TCGv_ptr tmp;
switch (op1) {
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 0:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(s)
break;
case 4:
- tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tmp = tcg_temp_new_ptr();
tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
PAS_OP(u)
break;
@@ -796,7 +782,7 @@ static void gen_test_cc(int cc, int label)
dead_tmp(tmp);
}
-const uint8_t table_logic_cc[16] = {
+static const uint8_t table_logic_cc[16] = {
1, /* and */
1, /* xor */
0, /* sub */
@@ -852,17 +838,19 @@ static inline void gen_bx_T0(DisasContext *s)
gen_bx(s, tmp);
}
-#if defined(CONFIG_USER_ONLY)
-#define gen_ldst(name, s) gen_op_##name##_raw()
-#else
-#define gen_ldst(name, s) do { \
- s->is_mem = 1; \
- if (IS_USER(s)) \
- gen_op_##name##_user(); \
- else \
- gen_op_##name##_kernel(); \
- } while (0)
-#endif
+/* Variant of store_reg which uses branch&exchange logic when storing
+ to r15 in ARM architecture v7 and above. The source must be a temporary
+ and will be marked as dead. */
+static inline void store_reg_bx(CPUState *env, DisasContext *s,
+ int reg, TCGv var)
+{
+ if (reg == 15 && ENABLE_ARCH_7) {
+ gen_bx(s, var);
+ } else {
+ store_reg(s, reg, var);
+ }
+}
+
static inline TCGv gen_ld8s(TCGv addr, int index)
{
TCGv tmp = new_tmp();
@@ -1030,15 +1018,6 @@ static inline void gen_vfp_##name(int dp) \
gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
}
-#define VFP_OP1(name) \
-static inline void gen_vfp_##name(int dp, int arg) \
-{ \
- if (dp) \
- gen_op_vfp_##name##d(arg); \
- else \
- gen_op_vfp_##name##s(arg); \
-}
-
VFP_OP2(add)
VFP_OP2(sub)
VFP_OP2(mul)
@@ -1221,12 +1200,12 @@ static void neon_store_reg(int reg, int pass, TCGv var)
dead_tmp(var);
}
-static inline void neon_load_reg64(TCGv var, int reg)
+static inline void neon_load_reg64(TCGv_i64 var, int reg)
{
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
-static inline void neon_store_reg64(TCGv var, int reg)
+static inline void neon_store_reg64(TCGv_i64 var, int reg)
{
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
}
@@ -1262,12 +1241,12 @@ static inline void gen_mov_vreg_F0(int dp, int reg)
#define ARM_CP_RW_BIT (1 << 20)
-static inline void iwmmxt_load_reg(TCGv var, int reg)
+static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
{
tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
}
-static inline void iwmmxt_store_reg(TCGv var, int reg)
+static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
{
tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
}
@@ -1487,10 +1466,7 @@ static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
{
- tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
- tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
- tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
- tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
iwmmxt_store_reg(cpu_V0, rn);
}
@@ -2847,10 +2823,12 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
} else if (size == 1) {
gen_neon_dup_low16(tmp);
}
- tmp2 = new_tmp();
- tcg_gen_mov_i32(tmp2, tmp);
- neon_store_reg(rn, 0, tmp2);
- neon_store_reg(rn, 0, tmp);
+ for (n = 0; n <= pass * 2; n++) {
+ tmp2 = new_tmp();
+ tcg_gen_mov_i32(tmp2, tmp);
+ neon_store_reg(rn, n, tmp2);
+ }
+ neon_store_reg(rn, n, tmp);
} else {
/* VMOV */
switch (size) {
@@ -3065,6 +3043,10 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 21:
case 22:
case 23:
+ case 28:
+ case 29:
+ case 30:
+ case 31:
/* Source and destination the same. */
gen_mov_F0_vreg(dp, rd);
break;
@@ -3100,9 +3082,9 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
break;
case 3: /* nmsc: -fd - (fn * fm) */
gen_vfp_mul(dp);
- gen_mov_F1_vreg(dp, rd);
- gen_vfp_add(dp);
gen_vfp_neg(dp);
+ gen_mov_F1_vreg(dp, rd);
+ gen_vfp_sub(dp);
break;
case 4: /* mul: fn * fm */
gen_vfp_mul(dp);
@@ -3184,22 +3166,22 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 20: /* fshto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_shto(dp, rm);
+ gen_vfp_shto(dp, 16 - rm);
break;
case 21: /* fslto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_slto(dp, rm);
+ gen_vfp_slto(dp, 32 - rm);
break;
case 22: /* fuhto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_uhto(dp, rm);
+ gen_vfp_uhto(dp, 16 - rm);
break;
case 23: /* fulto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_ulto(dp, rm);
+ gen_vfp_ulto(dp, 32 - rm);
break;
case 24: /* ftoui */
gen_vfp_toui(dp);
@@ -3216,22 +3198,22 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 28: /* ftosh */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_tosh(dp, rm);
+ gen_vfp_tosh(dp, 16 - rm);
break;
case 29: /* ftosl */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_tosl(dp, rm);
+ gen_vfp_tosl(dp, 32 - rm);
break;
case 30: /* ftouh */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_touh(dp, rm);
+ gen_vfp_touh(dp, 16 - rm);
break;
case 31: /* ftoul */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_toul(dp, rm);
+ gen_vfp_toul(dp, 32 - rm);
break;
default: /* undefined */
printf ("rn:%d\n", rn);
@@ -3504,11 +3486,11 @@ static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
return 0;
}
-/* Generate an old-style exception return. */
-static void gen_exception_return(DisasContext *s)
+/* Generate an old-style exception return. Marks pc as dead. */
+static void gen_exception_return(DisasContext *s, TCGv pc)
{
TCGv tmp;
- gen_movl_reg_T0(s, 15);
+ store_reg(s, 15, pc);
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, 0xffffffff);
dead_tmp(tmp);
@@ -3857,7 +3839,7 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
tmp2 = new_tmp();
tcg_gen_mov_i32(tmp2, tmp);
neon_store_reg(rd, 0, tmp2);
- neon_store_reg(rd, 0, tmp);
+ neon_store_reg(rd, 1, tmp);
rd += stride;
}
stride = (1 << size) * nregs;
@@ -3950,7 +3932,7 @@ static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
tcg_gen_or_i32(dest, t, f);
}
-static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
+static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
case 0: gen_helper_neon_narrow_u8(dest, src); break;
@@ -3960,7 +3942,7 @@ static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
}
}
-static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
+static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
@@ -3970,7 +3952,7 @@ static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
}
}
-static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
+static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
@@ -4014,7 +3996,7 @@ static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
}
}
-static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
+static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
{
if (u) {
switch (size) {
@@ -4054,7 +4036,7 @@ static inline void gen_neon_subl(int size)
}
}
-static inline void gen_neon_negl(TCGv var, int size)
+static inline void gen_neon_negl(TCGv_i64 var, int size)
{
switch (size) {
case 0: gen_helper_neon_negl_u16(var, var); break;
@@ -4064,7 +4046,7 @@ static inline void gen_neon_negl(TCGv var, int size)
}
}
-static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
+static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
{
switch (size) {
case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
@@ -4073,9 +4055,9 @@ static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
}
}
-static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
+static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
{
- TCGv tmp;
+ TCGv_i64 tmp;
switch ((size << 1) | u) {
case 0: gen_helper_neon_mull_s8(dest, a, b); break;
@@ -4119,6 +4101,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
TCGv tmp;
TCGv tmp2;
TCGv tmp3;
+ TCGv_i64 tmp64;
if (!vfp_enabled(env))
return 1;
@@ -4675,12 +4658,16 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
imm = (uint16_t)shift;
imm |= imm << 16;
tmp2 = tcg_const_i32(imm);
+ TCGV_UNUSED_I64(tmp64);
break;
case 2:
imm = (uint32_t)shift;
tmp2 = tcg_const_i32(imm);
+ TCGV_UNUSED_I64(tmp64);
+ break;
case 3:
- tmp2 = tcg_const_i64(shift);
+ tmp64 = tcg_const_i64(shift);
+ TCGV_UNUSED(tmp2);
break;
default:
abort();
@@ -4691,26 +4678,23 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
neon_load_reg64(cpu_V0, rm + pass);
if (q) {
if (u)
- gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
else
- gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
} else {
if (u)
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
else
- gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
+ gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
}
} else {
tmp = neon_load_reg(rm + pass, 0);
gen_neon_shift_narrow(size, tmp, tmp2, q, u);
- tcg_gen_extu_i32_i64(cpu_V0, tmp);
- dead_tmp(tmp);
- tmp = neon_load_reg(rm + pass, 1);
- gen_neon_shift_narrow(size, tmp, tmp2, q, u);
- tcg_gen_extu_i32_i64(cpu_V1, tmp);
+ tmp3 = neon_load_reg(rm + pass, 1);
+ gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
+ tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
dead_tmp(tmp);
- tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
- tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ dead_tmp(tmp3);
}
tmp = new_tmp();
if (op == 8 && !u) {
@@ -4854,7 +4838,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
neon_store_reg(rd, pass, tmp);
}
}
- } else { /* (insn & 0x00800010 == 0x00800010) */
+ } else { /* (insn & 0x00800010 == 0x00800000) */
if (size != 3) {
op = (insn >> 8) & 0xf;
if ((insn & (1 << 6)) == 0) {
@@ -5176,16 +5160,16 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
neon_load_reg64(cpu_V1, rm);
}
} else if (q) {
- tmp = tcg_temp_new(TCG_TYPE_I64);
+ tmp64 = tcg_temp_new_i64();
if (imm < 8) {
neon_load_reg64(cpu_V0, rn);
- neon_load_reg64(tmp, rn + 1);
+ neon_load_reg64(tmp64, rn + 1);
} else {
neon_load_reg64(cpu_V0, rn + 1);
- neon_load_reg64(tmp, rm);
+ neon_load_reg64(tmp64, rm);
}
tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
- tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
+ tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
if (imm < 8) {
neon_load_reg64(cpu_V1, rm);
@@ -5194,13 +5178,14 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
imm -= 8;
}
tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
- tcg_gen_shri_i64(tmp, tmp, imm * 8);
- tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
+ tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
+ tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
} else {
+ /* BUGFIX */
neon_load_reg64(cpu_V0, rn);
- tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
neon_load_reg64(cpu_V1, rm);
- tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
+ tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
}
neon_store_reg64(cpu_V0, rd);
@@ -5219,7 +5204,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
NEON_GET_REG(T0, rm, pass * 2);
NEON_GET_REG(T1, rm, pass * 2 + 1);
switch (size) {
- case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
case 1: gen_swap_half(cpu_T[0]); break;
case 2: /* no-op */ break;
default: abort();
@@ -5230,7 +5215,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
} else {
gen_op_movl_T0_T1();
switch (size) {
- case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
case 1: gen_swap_half(cpu_T[0]); break;
default: abort();
}
@@ -5372,7 +5357,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
switch (op) {
case 1: /* VREV32 */
switch (size) {
- case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
case 1: gen_swap_half(cpu_T[0]); break;
default: return 1;
}
@@ -5544,7 +5529,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
}
} else if ((insn & (1 << 10)) == 0) {
/* VTBL, VTBX. */
- n = (insn >> 5) & 0x18;
+ n = ((insn >> 5) & 0x18) + 8;
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 0);
} else {
@@ -5554,6 +5539,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
tmp2 = neon_load_reg(rm, 0);
gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
tcg_const_i32(n));
+ dead_tmp(tmp);
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 1);
} else {
@@ -5564,7 +5550,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
tcg_const_i32(n));
neon_store_reg(rd, 0, tmp2);
- neon_store_reg(rd, 1, tmp2);
+ neon_store_reg(rd, 1, tmp3);
+ dead_tmp(tmp);
} else if ((insn & 0x380) == 0) {
/* VDUP */
if (insn & (1 << 19)) {
@@ -5591,6 +5578,71 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
return 0;
}
+static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
+{
+ int crn = (insn >> 16) & 0xf;
+ int crm = insn & 0xf;
+ int op1 = (insn >> 21) & 7;
+ int op2 = (insn >> 5) & 7;
+ int rt = (insn >> 12) & 0xf;
+ TCGv tmp;
+
+ if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
+ if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
+ /* TEECR */
+ if (IS_USER(s))
+ return 1;
+ tmp = load_cpu_field(teecr);
+ store_reg(s, rt, tmp);
+ return 0;
+ }
+ if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
+ /* TEEHBR */
+ if (IS_USER(s) && (env->teecr & 1))
+ return 1;
+ tmp = load_cpu_field(teehbr);
+ store_reg(s, rt, tmp);
+ return 0;
+ }
+ }
+ fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
+ op1, crn, crm, op2);
+ return 1;
+}
+
+static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
+{
+ int crn = (insn >> 16) & 0xf;
+ int crm = insn & 0xf;
+ int op1 = (insn >> 21) & 7;
+ int op2 = (insn >> 5) & 7;
+ int rt = (insn >> 12) & 0xf;
+ TCGv tmp;
+
+ if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
+ if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
+ /* TEECR */
+ if (IS_USER(s))
+ return 1;
+ tmp = load_reg(s, rt);
+ gen_helper_set_teecr(cpu_env, tmp);
+ dead_tmp(tmp);
+ return 0;
+ }
+ if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
+ /* TEEHBR */
+ if (IS_USER(s) && (env->teecr & 1))
+ return 1;
+ tmp = load_reg(s, rt);
+ store_cpu_field(tmp, teehbr);
+ return 0;
+ }
+ }
+ fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
+ op1, crn, crm, op2);
+ return 1;
+}
+
static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
{
int cpnum;
@@ -5612,9 +5664,19 @@ static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 10:
case 11:
return disas_vfp_insn (env, s, insn);
+ case 14:
+ /* Coprocessors 7-15 are architecturally reserved by ARM.
+ Unfortunately Intel decided to ignore this. */
+ if (arm_feature(env, ARM_FEATURE_XSCALE))
+ goto board;
+ if (insn & (1 << 20))
+ return disas_cp14_read(env, s, insn);
+ else
+ return disas_cp14_write(env, s, insn);
case 15:
return disas_cp15_insn (env, s, insn);
default:
+ board:
/* Unknown coprocessor. See if the board has hooked it. */
return disas_cp_insn (env, s, insn);
}
@@ -5622,7 +5684,7 @@ static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
/* Store a 64-bit value to a register pair. Clobbers val. */
-static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
+static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
{
TCGv tmp;
tmp = new_tmp();
@@ -5635,13 +5697,13 @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
}
/* load a 32-bit value from a register and perform a 64-bit accumulate. */
-static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
+static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
{
- TCGv tmp;
+ TCGv_i64 tmp;
TCGv tmp2;
- /* Load 64-bit value rd:rn. */
- tmp = tcg_temp_new(TCG_TYPE_I64);
+ /* Load value and extend to 64 bits. */
+ tmp = tcg_temp_new_i64();
tmp2 = load_reg(s, rlow);
tcg_gen_extu_i32_i64(tmp, tmp2);
dead_tmp(tmp2);
@@ -5649,27 +5711,24 @@ static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
}
/* load and add a 64-bit value from a register pair. */
-static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
+static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
{
- TCGv tmp;
- TCGv tmp2;
+ TCGv_i64 tmp;
+ TCGv tmpl;
+ TCGv tmph;
/* Load 64-bit value rd:rn. */
- tmp = tcg_temp_new(TCG_TYPE_I64);
- tmp2 = load_reg(s, rhigh);
- tcg_gen_extu_i32_i64(tmp, tmp2);
- dead_tmp(tmp2);
- tcg_gen_shli_i64(tmp, tmp, 32);
- tcg_gen_add_i64(val, val, tmp);
-
- tmp2 = load_reg(s, rlow);
- tcg_gen_extu_i32_i64(tmp, tmp2);
- dead_tmp(tmp2);
+ tmpl = load_reg(s, rlow);
+ tmph = load_reg(s, rhigh);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
+ dead_tmp(tmpl);
+ dead_tmp(tmph);
tcg_gen_add_i64(val, val, tmp);
}
/* Set N and Z flags from a 64-bit value. */
-static void gen_logicq_cc(TCGv val)
+static void gen_logicq_cc(TCGv_i64 val)
{
TCGv tmp = new_tmp();
gen_helper_logicq_cc(tmp, val);
@@ -5680,13 +5739,14 @@ static void gen_logicq_cc(TCGv val)
static void disas_arm_insn(CPUState * env, DisasContext *s)
{
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
+#ifdef CONFIG_TRACE
+ int ticks;
+#endif
TCGv tmp;
TCGv tmp2;
TCGv tmp3;
TCGv addr;
-#ifdef CONFIG_TRACE
- int ticks = 0;
-#endif
+ TCGv_i64 tmp64;
insn = ldl_code(s->pc);
#ifdef CONFIG_TRACE
@@ -5705,7 +5765,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (cond == 0xf){
#ifdef CONFIG_TRACE
if (tracing) {
- gen_traceTicks(ticks);
+ gen_helper_traceTicks(ticks);
}
#endif
/* Unconditional instructions. */
@@ -5899,7 +5959,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (tracing) {
/* a non-executed conditional instruction takes */
/* only 1 cycle */
- gen_traceTicks(1);
+ gen_helper_traceTicks(1);
ticks -= 1;
}
#endif
@@ -5911,7 +5971,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
}
#ifdef CONFIG_TRACE
if (tracing && ticks > 0) {
- gen_traceTicks(ticks);
+ gen_helper_traceTicks(ticks);
}
#endif
if ((insn & 0x0f900000) == 0x03000000) {
@@ -6046,10 +6106,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tcg_gen_sari_i32(tmp2, tmp2, 16);
else
gen_sxth(tmp2);
- tmp2 = gen_muls_i64_i32(tmp, tmp2);
- tcg_gen_shri_i64(tmp2, tmp2, 16);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = new_tmp();
- tcg_gen_trunc_i64_i32(tmp, tmp2);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
gen_helper_add_setq(tmp, tmp, tmp2);
@@ -6063,11 +6123,11 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
dead_tmp(tmp2);
if (op1 == 2) {
- tmp2 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_i32_i64(tmp2, tmp);
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
dead_tmp(tmp);
- gen_addq(s, tmp2, rn, rd);
- gen_storeq_reg(s, rn, rd, tmp2);
+ gen_addq(s, tmp64, rn, rd);
+ gen_storeq_reg(s, rn, rd, tmp64);
} else {
if (op1 == 0) {
tmp2 = load_reg(s, rn);
@@ -6095,148 +6155,173 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* immediate operand */
val = insn & 0xff;
shift = ((insn >> 8) & 0xf) * 2;
- if (shift)
+ if (shift) {
val = (val >> shift) | (val << (32 - shift));
- gen_op_movl_T1_im(val);
- if (logic_cc && shift)
- gen_set_CF_bit31(cpu_T[1]);
+ }
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, val);
+ if (logic_cc && shift) {
+ gen_set_CF_bit31(tmp2);
+ }
} else {
/* register */
rm = (insn) & 0xf;
- gen_movl_T1_reg(s, rm);
+ tmp2 = load_reg(s, rm);
shiftop = (insn >> 5) & 3;
if (!(insn & (1 << 4))) {
shift = (insn >> 7) & 0x1f;
- gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
+ gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
} else {
rs = (insn >> 8) & 0xf;
tmp = load_reg(s, rs);
- gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
+ gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
}
}
if (op1 != 0x0f && op1 != 0x0d) {
rn = (insn >> 16) & 0xf;
- gen_movl_T0_reg(s, rn);
+ tmp = load_reg(s, rn);
+ } else {
+ TCGV_UNUSED(tmp);
}
rd = (insn >> 12) & 0xf;
switch(op1) {
case 0x00:
- gen_op_andl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x01:
- gen_op_xorl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x02:
if (set_cc && rd == 15) {
/* SUBS r15, ... is used for exception return. */
- if (IS_USER(s))
+ if (IS_USER(s)) {
goto illegal_op;
- gen_op_subl_T0_T1_cc();
- gen_exception_return(s);
+ }
+ gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_exception_return(s, tmp);
} else {
- if (set_cc)
- gen_op_subl_T0_T1_cc();
- else
- gen_op_subl_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sub_cc(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
}
break;
case 0x03:
- if (set_cc)
- gen_op_rsbl_T0_T1_cc();
- else
- gen_op_rsbl_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sub_cc(tmp, tmp2, tmp);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x04:
- if (set_cc)
- gen_op_addl_T0_T1_cc();
- else
- gen_op_addl_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_add_cc(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x05:
- if (set_cc)
- gen_op_adcl_T0_T1_cc();
- else
- gen_adc_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_adc_cc(tmp, tmp, tmp2);
+ } else {
+ gen_add_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x06:
- if (set_cc)
- gen_op_sbcl_T0_T1_cc();
- else
- gen_sbc_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sbc_cc(tmp, tmp, tmp2);
+ } else {
+ gen_sub_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x07:
- if (set_cc)
- gen_op_rscl_T0_T1_cc();
- else
- gen_rsc_T0_T1();
- gen_movl_reg_T0(s, rd);
+ if (set_cc) {
+ gen_helper_sbc_cc(tmp, tmp2, tmp);
+ } else {
+ gen_sub_carry(tmp, tmp2, tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x08:
if (set_cc) {
- gen_op_andl_T0_T1();
- gen_op_logic_T0_cc();
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
}
+ dead_tmp(tmp);
break;
case 0x09:
if (set_cc) {
- gen_op_xorl_T0_T1();
- gen_op_logic_T0_cc();
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
}
+ dead_tmp(tmp);
break;
case 0x0a:
if (set_cc) {
- gen_op_subl_T0_T1_cc();
+ gen_helper_sub_cc(tmp, tmp, tmp2);
}
+ dead_tmp(tmp);
break;
case 0x0b:
if (set_cc) {
- gen_op_addl_T0_T1_cc();
+ gen_helper_add_cc(tmp, tmp, tmp2);
}
+ dead_tmp(tmp);
break;
case 0x0c:
- gen_op_orl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
case 0x0d:
if (logic_cc && rd == 15) {
/* MOVS r15, ... is used for exception return. */
- if (IS_USER(s))
+ if (IS_USER(s)) {
goto illegal_op;
- gen_op_movl_T0_T1();
- gen_exception_return(s);
+ }
+ gen_exception_return(s, tmp2);
} else {
- gen_movl_reg_T1(s, rd);
- if (logic_cc)
- gen_op_logic_T1_cc();
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp2);
}
break;
case 0x0e:
- gen_op_bicl_T0_T1();
- gen_movl_reg_T0(s, rd);
- if (logic_cc)
- gen_op_logic_T0_cc();
+ tcg_gen_bic_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(env, s, rd, tmp);
break;
default:
case 0x0f:
- gen_op_notl_T1();
- gen_movl_reg_T1(s, rd);
- if (logic_cc)
- gen_op_logic_T1_cc();
+ tcg_gen_not_i32(tmp2, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(env, s, rd, tmp2);
break;
}
+ if (op1 != 0x0f && op1 != 0x0d) {
+ dead_tmp(tmp2);
+ }
} else {
/* other instructions */
op1 = (insn >> 24) & 0xf;
@@ -6280,19 +6365,19 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp = load_reg(s, rs);
tmp2 = load_reg(s, rm);
if (insn & (1 << 22))
- tmp = gen_muls_i64_i32(tmp, tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
else
- tmp = gen_mulu_i64_i32(tmp, tmp2);
+ tmp64 = gen_mulu_i64_i32(tmp, tmp2);
if (insn & (1 << 21)) /* mult accumulate */
- gen_addq(s, tmp, rn, rd);
+ gen_addq(s, tmp64, rn, rd);
if (!(insn & (1 << 23))) { /* double accumulate */
ARCH(6);
- gen_addq_lo(s, tmp, rn);
- gen_addq_lo(s, tmp, rd);
+ gen_addq_lo(s, tmp64, rn);
+ gen_addq_lo(s, tmp64, rd);
}
if (insn & (1 << 20))
- gen_logicq_cc(tmp);
- gen_storeq_reg(s, rn, rd, tmp);
+ gen_logicq_cc(tmp64);
+ gen_storeq_reg(s, rn, rd, tmp64);
break;
}
} else {
@@ -6300,11 +6385,35 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
rd = (insn >> 12) & 0xf;
if (insn & (1 << 23)) {
/* load/store exclusive */
+ op1 = (insn >> 21) & 0x3;
+ if (op1)
+ ARCH(6K);
+ else
+ ARCH(6);
gen_movl_T1_reg(s, rn);
addr = cpu_T[1];
if (insn & (1 << 20)) {
gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
- tmp = gen_ld32(addr, IS_USER(s));
+ switch (op1) {
+ case 0: /* ldrex */
+ tmp = gen_ld32(addr, IS_USER(s));
+ break;
+ case 1: /* ldrexd */
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = gen_ld32(addr, IS_USER(s));
+ rd++;
+ break;
+ case 2: /* ldrexb */
+ tmp = gen_ld8u(addr, IS_USER(s));
+ break;
+ case 3: /* ldrexh */
+ tmp = gen_ld16u(addr, IS_USER(s));
+ break;
+ default:
+ abort();
+ }
store_reg(s, rd, tmp);
} else {
int label = gen_new_label();
@@ -6313,7 +6422,25 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
0, label);
tmp = load_reg(s,rm);
- gen_st32(tmp, cpu_T[1], IS_USER(s));
+ switch (op1) {
+ case 0: /* strex */
+ gen_st32(tmp, addr, IS_USER(s));
+ break;
+ case 1: /* strexd */
+ gen_st32(tmp, addr, IS_USER(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rm + 1);
+ gen_st32(tmp, addr, IS_USER(s));
+ break;
+ case 2: /* strexb */
+ gen_st8(tmp, addr, IS_USER(s));
+ break;
+ case 3: /* strexh */
+ gen_st16(tmp, addr, IS_USER(s));
+ break;
+ default:
+ abort();
+ }
gen_set_label(label);
gen_movl_reg_T0(s, rd);
}
@@ -6536,7 +6663,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (insn & (1 << 7))
gen_rev16(tmp);
else
- tcg_gen_bswap_i32(tmp, tmp);
+ tcg_gen_bswap32_i32(tmp, tmp);
}
store_reg(s, rd, tmp);
} else {
@@ -6548,14 +6675,14 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp2 = load_reg(s, rs);
if (insn & (1 << 20)) {
/* Signed multiply most significant [accumulate]. */
- tmp2 = gen_muls_i64_i32(tmp, tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
if (insn & (1 << 5))
- tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
- tcg_gen_shri_i64(tmp2, tmp2, 32);
+ tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
+ tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = new_tmp();
- tcg_gen_trunc_i64_i32(tmp, tmp2);
- if (rn != 15) {
- tmp2 = load_reg(s, rn);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
+ if (rd != 15) {
+ tmp2 = load_reg(s, rd);
if (insn & (1 << 6)) {
tcg_gen_sub_i32(tmp, tmp, tmp2);
} else {
@@ -6563,7 +6690,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
}
dead_tmp(tmp2);
}
- store_reg(s, rd, tmp);
+ store_reg(s, rn, tmp);
} else {
if (insn & (1 << 5))
gen_swap_half(tmp2);
@@ -6577,11 +6704,11 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
dead_tmp(tmp2);
if (insn & (1 << 22)) {
/* smlald, smlsld */
- tmp2 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_i32_i64(tmp2, tmp);
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
dead_tmp(tmp);
- gen_addq(s, tmp2, rd, rn);
- gen_storeq_reg(s, rd, rn, tmp2);
+ gen_addq(s, tmp64, rd, rn);
+ gen_storeq_reg(s, rd, rn, tmp64);
} else {
/* smuad, smusd, smlad, smlsd */
if (rd != 15)
@@ -6603,12 +6730,12 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
tmp2 = load_reg(s, rs);
gen_helper_usad8(tmp, tmp, tmp2);
dead_tmp(tmp2);
- if (rn != 15) {
- tmp2 = load_reg(s, rn);
+ if (rd != 15) {
+ tmp2 = load_reg(s, rd);
tcg_gen_add_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
- store_reg(s, rd, tmp);
+ store_reg(s, rn, tmp);
break;
case 0x20: case 0x24: case 0x28: case 0x2c:
/* Bitfield insert/clear. */
@@ -6631,6 +6758,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
break;
case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
+ ARCH(6T2);
tmp = load_reg(s, rm);
shift = (insn >> 7) & 0x1f;
i = ((insn >> 16) & 0x1f) + 1;
@@ -6671,7 +6799,6 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
gen_add_data_offset(s, insn, tmp2);
if (insn & (1 << 20)) {
/* load */
- s->is_mem = 1;
if (insn & (1 << 22)) {
tmp = gen_ld8u(tmp2, i);
} else {
@@ -6950,6 +7077,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
TCGv tmp2;
TCGv tmp3;
TCGv addr;
+ TCGv_i64 tmp64;
int op;
int shiftop;
int conds;
@@ -7003,7 +7131,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
int ticks = get_insn_ticks_thumb(insn);
trace_add_insn( insn_wrap_thumb(insn), 1 );
gen_helper_traceInsn();
- gen_traceTicks(ticks);
+ gen_helper_traceTicks(ticks);
}
#endif
s->pc += 2;
@@ -7298,7 +7426,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
if (logic_cc)
gen_logic_CC(tmp);
- store_reg(s, rd, tmp);
+ store_reg_bx(env, s, rd, tmp);
break;
case 1: /* Sign/zero extend. */
tmp = load_reg(s, rm);
@@ -7359,7 +7487,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_helper_rbit(tmp, tmp);
break;
case 0x08: /* rev */
- tcg_gen_bswap_i32(tmp, tmp);
+ tcg_gen_bswap32_i32(tmp, tmp);
break;
case 0x09: /* rev16 */
gen_rev16(tmp);
@@ -7434,10 +7562,10 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tcg_gen_sari_i32(tmp2, tmp2, 16);
else
gen_sxth(tmp2);
- tmp2 = gen_muls_i64_i32(tmp, tmp2);
- tcg_gen_shri_i64(tmp2, tmp2, 16);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = new_tmp();
- tcg_gen_trunc_i64_i32(tmp, tmp2);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
@@ -7501,36 +7629,38 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
tcg_gen_add_i32(tmp, tmp, tmp2);
}
dead_tmp(tmp2);
- tmp2 = tcg_temp_new(TCG_TYPE_I64);
- gen_addq(s, tmp, rs, rd);
- gen_storeq_reg(s, rs, rd, tmp);
+ /* BUGFIX */
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
+ dead_tmp(tmp);
+ gen_addq(s, tmp64, rs, rd);
+ gen_storeq_reg(s, rs, rd, tmp64);
} else {
if (op & 0x20) {
/* Unsigned 64-bit multiply */
- tmp = gen_mulu_i64_i32(tmp, tmp2);
+ tmp64 = gen_mulu_i64_i32(tmp, tmp2);
} else {
if (op & 8) {
/* smlalxy */
gen_mulxy(tmp, tmp2, op & 2, op & 1);
dead_tmp(tmp2);
- tmp2 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_i32_i64(tmp2, tmp);
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
dead_tmp(tmp);
- tmp = tmp2;
} else {
/* Signed 64-bit multiply */
- tmp = gen_muls_i64_i32(tmp, tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
}
}
if (op & 4) {
/* umaal */
- gen_addq_lo(s, tmp, rs);
- gen_addq_lo(s, tmp, rd);
+ gen_addq_lo(s, tmp64, rs);
+ gen_addq_lo(s, tmp64, rd);
} else if (op & 0x40) {
/* 64-bit accumulate. */
- gen_addq(s, tmp, rs, rd);
+ gen_addq(s, tmp64, rs, rd);
}
- gen_storeq_reg(s, rs, rd, tmp);
+ gen_storeq_reg(s, rs, rd, tmp64);
}
break;
}
@@ -7988,7 +8118,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
int ticks = get_insn_ticks_thumb(insn);
trace_add_insn( insn_wrap_thumb(insn), 1 );
gen_helper_traceInsn();
- gen_traceTicks(ticks);
+ gen_helper_traceTicks(ticks);
}
#endif
s->pc += 2;
@@ -8499,7 +8629,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
rd = insn & 0x7;
tmp = load_reg(s, rn);
switch ((insn >> 6) & 3) {
- case 0: tcg_gen_bswap_i32(tmp, tmp); break;
+ case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
case 1: gen_rev16(tmp); break;
case 3: gen_revsh(tmp); break;
default: goto illegal_op;
@@ -8635,6 +8765,7 @@ static inline void gen_intermediate_code_internal(CPUState *env,
int search_pc)
{
DisasContext dc1, *dc = &dc1;
+ CPUBreakpoint *bp;
uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
@@ -8659,7 +8790,6 @@ static inline void gen_intermediate_code_internal(CPUState *env,
dc->thumb = env->thumb;
dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
dc->condexec_cond = env->condexec_bits >> 4;
- dc->is_mem = 0;
#if !defined(CONFIG_USER_ONLY)
if (IS_M(env)) {
dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
@@ -8667,14 +8797,14 @@ static inline void gen_intermediate_code_internal(CPUState *env,
dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
}
#endif
- cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
- cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
- cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
- cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
+ cpu_F0s = tcg_temp_new_i32();
+ cpu_F1s = tcg_temp_new_i32();
+ cpu_F0d = tcg_temp_new_i64();
+ cpu_F1d = tcg_temp_new_i64();
cpu_V0 = cpu_F0d;
cpu_V1 = cpu_F1d;
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
- cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
+ cpu_M0 = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
lj = -1;
num_insns = 0;
@@ -8693,7 +8823,7 @@ static inline void gen_intermediate_code_internal(CPUState *env,
}
#ifdef CONFIG_TRACE
if (tracing) {
- gen_traceBB(trace_static.bb_num, (target_phys_addr_t)tb );
+ gen_helper_traceBB(trace_static.bb_num, (target_phys_addr_t)tb );
trace_bb_start(dc->pc);
}
#endif
@@ -8718,9 +8848,9 @@ static inline void gen_intermediate_code_internal(CPUState *env,
}
#endif
- if (env->nb_breakpoints > 0) {
- for(j = 0; j < env->nb_breakpoints; j++) {
- if (env->breakpoints[j] == dc->pc) {
+ if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == dc->pc) {
gen_set_condexec(dc);
gen_set_pc_im(dc->pc);
gen_exception(EXCP_DEBUG);
@@ -8770,19 +8900,14 @@ static inline void gen_intermediate_code_internal(CPUState *env,
gen_set_label(dc->condlabel);
dc->condjmp = 0;
}
- /* Terminate the TB on memory ops if watchpoints are present. */
- /* FIXME: This should be replacd by the deterministic execution
- * IRQ raising bits. */
- if (dc->is_mem && env->nb_watchpoints)
- break;
-
- /* Translation stops when a conditional branch is enoutered.
+ /* Translation stops when a conditional branch is encountered.
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
+ !singlestep &&
dc->pc < next_page_start &&
num_insns < max_insns);
@@ -8870,11 +8995,11 @@ done_generating:
*gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
- if (loglevel & CPU_LOG_TB_IN_ASM) {
- fprintf(logfile, "----------------\n");
- fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
- target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
- fprintf(logfile, "\n");
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
+ qemu_log("\n");
}
#endif
if (search_pc) {